diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 37ea49e3a6d95..19e99852869e6 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 788960c76e150..7dd8269f4ffe6 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -288,8 +288,8 @@ steps: env: BWC_VERSION: 8.15.4 - - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 + - label: "{{matrix.image}} / 8.16.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.1 timeout_in_minutes: 300 matrix: setup: @@ -302,7 +302,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.0 + BWC_VERSION: 8.16.1 - label: "{{matrix.image}} / 8.17.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 7b6a6ea72fe83..79371d6ddccf5 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.0#bwcTest + - label: 8.16.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.0 + BWC_VERSION: 8.16.1 retry: automatic: - exit_status: "-1" @@ -429,7 +429,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -471,7 +471,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/scripts/gradle-configuration-cache-validation.sh b/.buildkite/scripts/gradle-configuration-cache-validation.sh index 8249155c5ffc5..55a4b18a1e887 100755 --- a/.buildkite/scripts/gradle-configuration-cache-validation.sh +++ b/.buildkite/scripts/gradle-configuration-cache-validation.sh @@ -2,18 +2,17 @@ set -euo pipefail -# TODO/ FIXIT without a full resolved gradle home, we see issues configuration cache reuse -./gradlew --max-workers=8 --parallel --scan --no-daemon precommit +# This is a workaround for https://github.com/gradle/gradle/issues/28159 +.ci/scripts/run-gradle.sh --no-daemon precommit -./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 +.ci/scripts/run-gradle.sh --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 # Create a temporary file tmpOutputFile=$(mktemp) trap "rm $tmpOutputFile" EXIT echo "2nd run" -# TODO run-gradle.sh script causes issues because of init script handling -./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile +.ci/scripts/run-gradle.sh --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile # Check if the command was successful if grep -q "Configuration cache entry reused." $tmpOutputFile; then diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 2e77631450825..85522e47a523f 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -15,6 +15,6 @@ BWC_VERSION: - "8.13.4" - "8.14.3" - "8.15.4" - - "8.16.0" + - "8.16.1" - "8.17.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index c6edc709a8ceb..9ea3072021bb3 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "8.15.4" - - "8.16.0" + - "8.16.1" - "8.17.0" - "9.0.0" diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 3a066be2f8ea9..6f2dc3c64febe 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -472,7 +472,7 @@ You can run a group of YAML test by using wildcards: --tests "org.elasticsearch.test.rest.ClientYamlTestSuiteIT.test {yaml=index/*/*}" --------------------------------------------------------------------------- -or +or --------------------------------------------------------------------------- ./gradlew :rest-api-spec:yamlRestTest \ @@ -564,8 +564,8 @@ Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). Another use case, since the introduction of serverless, is to test BWC against main in addition to the other released branches. To do so, specify the `bwc.refspec` remote and branch to use for the BWC build as `origin/main`. -To test against main, you will also need to create a new version in link:./server/src/main/java/org/elasticsearch/Version.java[Version.java], -increment `elasticsearch` in link:./build-tools-internal/version.properties[version.properties], and hard-code the `project.version` for ml-cpp +To test against main, you will also need to create a new version in link:./server/src/main/java/org/elasticsearch/Version.java[Version.java], +increment `elasticsearch` in link:./build-tools-internal/version.properties[version.properties], and hard-code the `project.version` for ml-cpp in link:./x-pack/plugin/ml/build.gradle[ml/build.gradle]. In general, to test the changes, you can instruct Gradle to build the BWC version from another remote/branch combination instead of pulling the release branch from GitHub. @@ -625,7 +625,7 @@ For specific YAML rest tests one can use For disabling entire types of tests for subprojects, one can use for example: ------------------------------------------------ -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm) { // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index f3ced9f16d327..25cfae6c9803a 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.TestUtil /* @@ -78,7 +77,7 @@ tasks.register("copyPainless", Copy) { } tasks.named("run").configure { - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index" dependsOn "copyExpression", "copyPainless", configurations.nativeLib systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString()) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java index d3f210f774782..74cea5d5f1549 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java @@ -10,7 +10,6 @@ package org.elasticsearch.benchmark.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -28,7 +27,6 @@ import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.Script; @@ -56,13 +54,7 @@ public static MapperService create(String mappings) { MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); MapperService mapperService = new MapperService( () -> TransportVersion.current(), indexSettings, diff --git a/build-conventions/build.gradle b/build-conventions/build.gradle index d8c211c0f02f9..b0eda5a34065a 100644 --- a/build-conventions/build.gradle +++ b/build-conventions/build.gradle @@ -12,9 +12,6 @@ import org.gradle.plugins.ide.eclipse.model.SourceFolder buildscript { repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() } } @@ -70,10 +67,6 @@ gradlePlugin { } repositories { - maven { - url 'https://jitpack.io' - } - mavenCentral() gradlePluginPortal() } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java index 9a35aa41ba1e5..0b04496866ca9 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java @@ -16,4 +16,12 @@ public abstract class GUtils { public static String capitalize(String s) { return s.substring(0, 1).toUpperCase(Locale.ROOT) + s.substring(1); } + + public static T elvis(T given, T fallback) { + if (given == null) { + return fallback; + } else { + return given; + } + } } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java index cd13743ee0746..c3124812e5089 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java @@ -9,12 +9,14 @@ package org.elasticsearch.gradle.internal.conventions; -import org.elasticsearch.gradle.internal.conventions.precommit.PomValidationPrecommitPlugin; +import groovy.util.Node; + import com.github.jengelman.gradle.plugins.shadow.ShadowExtension; import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin; -import groovy.util.Node; -import org.elasticsearch.gradle.internal.conventions.util.Util; + import org.elasticsearch.gradle.internal.conventions.info.GitInfo; +import org.elasticsearch.gradle.internal.conventions.precommit.PomValidationPrecommitPlugin; +import org.elasticsearch.gradle.internal.conventions.util.Util; import org.gradle.api.NamedDomainObjectSet; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -35,11 +37,12 @@ import org.gradle.api.tasks.bundling.Jar; import org.gradle.initialization.layout.BuildLayout; import org.gradle.language.base.plugins.LifecycleBasePlugin; +import org.w3c.dom.Element; -import javax.inject.Inject; import java.io.File; import java.util.Map; import java.util.concurrent.Callable; +import javax.inject.Inject; public class PublishPlugin implements Plugin { @@ -64,6 +67,7 @@ public void apply(Project project) { configureSourcesJar(project); configurePomGeneration(project); configurePublications(project); + formatGeneratedPom(project); } private void configurePublications(Project project) { @@ -113,29 +117,32 @@ private void configurePomGeneration(Project project) { var archivesBaseName = providerFactory.provider(() -> getArchivesBaseName(extensions)); var projectVersion = providerFactory.provider(() -> project.getVersion()); var generateMavenPoms = project.getTasks().withType(GenerateMavenPom.class); - generateMavenPoms.configureEach( - pomTask -> pomTask.setDestination( + generateMavenPoms.configureEach(pomTask -> { + pomTask.setDestination( (Callable) () -> String.format( "%s/distributions/%s-%s.pom", projectLayout.getBuildDirectory().get().getAsFile().getPath(), archivesBaseName.get(), projectVersion.get() ) - ) - ); + ); + }); + var publishing = extensions.getByType(PublishingExtension.class); final var mavenPublications = publishing.getPublications().withType(MavenPublication.class); - addNameAndDescriptiontoPom(project, mavenPublications); + addNameAndDescriptionToPom(project, mavenPublications); mavenPublications.configureEach(publication -> { - // Add git origin info to generated POM files for internal builds - publication.getPom().withXml(xml -> addScmInfo(xml, gitInfo.get())); + publication.getPom().withXml(xml -> { + // Add git origin info to generated POM files for internal builds + addScmInfo(xml, gitInfo.get()); + }); // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(archivesBaseName.get())); generatePomTask.configure(t -> t.dependsOn(generateMavenPoms)); }); } - private void addNameAndDescriptiontoPom(Project project, NamedDomainObjectSet mavenPublications) { + private void addNameAndDescriptionToPom(Project project, NamedDomainObjectSet mavenPublications) { var name = project.getName(); var description = providerFactory.provider(() -> project.getDescription() != null ? project.getDescription() : ""); mavenPublications.configureEach(p -> p.getPom().withXml(xml -> { @@ -186,4 +193,32 @@ static void configureSourcesJar(Project project) { project.getTasks().named(BasePlugin.ASSEMBLE_TASK_NAME).configure(t -> t.dependsOn(sourcesJarTask)); }); } + + + /** + * Format the generated pom files to be in a sort of reproducible order. + */ + private void formatGeneratedPom(Project project) { + var publishing = project.getExtensions().getByType(PublishingExtension.class); + final var mavenPublications = publishing.getPublications().withType(MavenPublication.class); + mavenPublications.configureEach(publication -> { + publication.getPom().withXml(xml -> { + // Add some pom formatting + formatDependencies(xml); + }); + }); + } + + /** + * just ensure we put dependencies to the end. more a cosmetic thing than anything else + * */ + private void formatDependencies(XmlProvider xml) { + Element rootElement = xml.asElement(); + var dependencies = rootElement.getElementsByTagName("dependencies"); + if (dependencies.getLength() == 1 && dependencies.item(0) != null) { + org.w3c.dom.Node item = dependencies.item(0); + rootElement.removeChild(item); + rootElement.appendChild(item); + } + } } diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 38d3c0cd326f9..f2a02645f8c09 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -258,9 +258,6 @@ tasks.named('licenseHeaders').configure { *****************************************************************************/ repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() gradlePluginPortal() } @@ -386,10 +383,13 @@ tasks.named("jar") { spotless { java { - // IDEs can sometimes run annotation processors that leave files in - // here, causing Spotless to complain. Even though this path ought not - // to exist, exclude it anyway in order to avoid spurious failures. - toggleOffOn() + + // workaround for https://github.com/diffplug/spotless/issues/2317 + //toggleOffOn() + target project.fileTree("src/main/java") { + include '**/*.java' + exclude '**/DockerBase.java' + } } } diff --git a/build-tools-internal/settings.gradle b/build-tools-internal/settings.gradle index 1b4fb1215a59d..8c88d36046768 100644 --- a/build-tools-internal/settings.gradle +++ b/build-tools-internal/settings.gradle @@ -1,8 +1,5 @@ pluginManagement { repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() gradlePluginPortal() } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy index b853fdef6a13f..34fa73ce502ac 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy @@ -73,7 +73,7 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { buildFile << """ plugins { id 'elasticsearch.java-doc' - id 'com.github.johnrengelman.shadow' version '7.1.2' + id 'com.gradleup.shadow' id 'java' } group = 'org.acme.depending' diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index 87f4bbee05780..6d080e1c80763 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -9,16 +9,10 @@ package org.elasticsearch.gradle.internal -import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.fixtures.AbstractGitAwareGradleFuncTest import org.gradle.testkit.runner.TaskOutcome -import spock.lang.IgnoreIf import spock.lang.Unroll -/* - * Test is ignored on ARM since this test case tests the ability to build certain older BWC branches that we don't support on ARM - */ -@IgnoreIf({ Architecture.current() == Architecture.AARCH64 }) class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest { def setup() { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index 6e403c85a23f4..c7e11ba96c7dd 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -96,7 +96,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.java' id 'elasticsearch.publish' - id 'com.github.johnrengelman.shadow' + id 'com.gradleup.shadow' } repositories { @@ -117,7 +117,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } version = "1.0" group = 'org.acme' - description = 'some description' + description = 'shadowed project' """ when: @@ -137,7 +137,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { hello-world 1.0 hello-world - some description + shadowed project unknown unknown @@ -186,7 +186,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.java' id 'elasticsearch.publish' - id 'com.github.johnrengelman.shadow' + id 'com.gradleup.shadow' } dependencies { @@ -206,7 +206,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { group = 'org.acme' } - description = 'some description' + description = 'with shadowed dependencies' """ when: @@ -226,7 +226,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { hello-world 1.0 hello-world - some description + with shadowed dependencies unknown unknown @@ -277,13 +277,13 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.internal-es-plugin' id 'elasticsearch.publish' - id 'com.github.johnrengelman.shadow' + id 'com.gradleup.shadow' } esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' - description = "custom project description" + description = "shadowed es plugin" } publishing { @@ -324,7 +324,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { hello-world-plugin 1.0 hello-world - custom project description + shadowed es plugin unknown unknown @@ -353,7 +353,6 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { https://www.elastic.co - """ ) } @@ -440,8 +439,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { // scm info only added for internal builds internalBuild() buildFile << """ - BuildParams.init { it.setGitOrigin("https://some-repo.com/repo.git") } - + buildParams.getGitOriginProperty().set("https://some-repo.com/repo.git") apply plugin:'elasticsearch.java' apply plugin:'elasticsearch.publish' diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy index 354100a9b82c5..725f117d17e64 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy @@ -161,7 +161,7 @@ class SnykDependencyMonitoringGradlePluginFuncTest extends AbstractGradleInterna }, "target": { "remoteUrl": "http://acme.org", - "branch": "unknown" + "branch": "$version" }, "targetReference": "$version", "projectAttributes": { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy index 97f03d9821117..ce5c1519fe11f 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy @@ -10,6 +10,7 @@ package org.elasticsearch.gradle.internal.test.rest import spock.lang.IgnoreIf +import spock.lang.IgnoreRest import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest @@ -20,16 +21,16 @@ import org.gradle.testkit.runner.TaskOutcome class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def setup() { + configurationCacheCompatible = true buildApiRestrictionsDisabled = true } def "yamlRestTest does nothing when there are no tests"() { given: + internalBuild() buildFile << """ - plugins { - id 'elasticsearch.legacy-yaml-rest-test' - } + apply plugin: 'elasticsearch.legacy-yaml-rest-test' """ when: @@ -136,7 +137,7 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { """ when: - def result = gradleRunner("yamlRestTest", "--console", 'plain', '--stacktrace').buildAndFail() + def result = gradleRunner("yamlRestTest", "--console", 'plain').buildAndFail() then: result.task(":distribution:archives:integ-test-zip:buildExpanded").outcome == TaskOutcome.SUCCESS diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 592e6af41ab00..847eda7a355c0 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -12,11 +12,14 @@ import java.time.LocalDateTime; import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.internal.info.BuildParams +import static org.elasticsearch.gradle.internal.util.CiUtils.safeName import java.lang.management.ManagementFactory import java.time.LocalDateTime +// Resolving this early to avoid issues with the build scan plugin in combination with the configuration cache usage +def taskNames = gradle.startParameter.taskNames.join(' ') + develocity { buildScan { @@ -34,12 +37,15 @@ develocity { publishing.onlyIf { false } } + def fips = buildParams.inFipsJvm + def gitRevision = buildParams.gitRevision + background { tag OS.current().name() tag Architecture.current().name() // Tag if this build is run in FIPS mode - if (BuildParams.inFipsJvm) { + if (fips) { tag 'FIPS' } @@ -92,8 +98,8 @@ develocity { link 'Source', "${prBaseUrl}/tree/${System.getenv('BUILDKITE_COMMIT')}" link 'Pull Request', "https://github.com/${repository}/pull/${prId}" } else { - value 'Git Commit ID', BuildParams.gitRevision - link 'Source', "https://github.com/${repository}/tree/${BuildParams.gitRevision}" + value 'Git Commit ID', gitRevision + link 'Source', "https://github.com/${repository}/tree/${gitRevision}" } buildFinished { result -> @@ -108,7 +114,7 @@ develocity { // Add a build annotation // See: https://buildkite.com/docs/agent/v3/cli-annotate - def body = """
${System.getenv('BUILDKITE_LABEL')} :gradle: ${result.failures ? 'failed' : 'successful'} build: gradle ${gradle.startParameter.taskNames.join(' ')}
""" + def body = """
${System.getenv('BUILDKITE_LABEL')} :gradle: ${result.failures ? 'failed' : 'successful'} build: gradle ${taskNames}
""" def process = [ 'buildkite-agent', 'annotate', @@ -129,7 +135,3 @@ develocity { } } } - -static def safeName(String string) { - return string.replaceAll(/[^a-zA-Z0-9_\-\.]+/, ' ').trim().replaceAll(' ', '_').toLowerCase() -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index 9a988292b5b8c..77e509ea97870 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -19,7 +18,7 @@ ext.bwcTaskName = { Version version -> def bwcTestSnapshots = tasks.register("bwcTestSnapshots") { if (project.bwc_tests_enabled) { - dependsOn tasks.matching { task -> BuildParams.bwcVersions.unreleased.any { version -> bwcTaskName(version) == task.name } } + dependsOn tasks.matching { task -> buildParams.bwcVersions.unreleased.any { version -> bwcTaskName(version) == task.name } } } } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 3bff30d9511fb..567812c740817 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -15,11 +15,12 @@ import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask import org.elasticsearch.gradle.testclusters.TestClustersAware import org.elasticsearch.gradle.testclusters.TestDistribution -// Common config when running with a FIPS-140 runtime JVM -if (BuildParams.inFipsJvm) { +//apply plugin: org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin +// Common config when running with a FIPS-140 runtime JVM +if (buildParams.inFipsJvm) { allprojects { - String javaSecurityFilename = BuildParams.runtimeJavaDetails.toLowerCase().contains('oracle') ? 'fips_java_oracle.security' : 'fips_java.security' + String javaSecurityFilename = buildParams.runtimeJavaDetails.toLowerCase().contains('oracle') ? 'fips_java_oracle.security' : 'fips_java.security' File fipsResourcesDir = new File(project.buildDir, 'fips-resources') File fipsSecurity = new File(fipsResourcesDir, javaSecurityFilename) File fipsPolicy = new File(fipsResourcesDir, 'fips_java.policy') diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 63a3cb6d86d68..5640409e0ff44 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -171,7 +171,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { idea { project { vcs = 'Git' - jdkName = BuildParams.minimumCompilerVersion.majorVersion + jdkName = buildParams.minimumCompilerVersion.majorVersion settings { delegateActions { diff --git a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle index aacc86e764d51..224e6bd4c50d1 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.precommit.ThirdPartyAuditPrecommitPlugin import org.elasticsearch.gradle.internal.precommit.ThirdPartyAuditTask import org.elasticsearch.gradle.internal.test.rest.RestTestBasePlugin @@ -27,8 +26,8 @@ configure(allprojects) { JvmVendorSpec.matching(VersionProperties.bundledJdkVendor) } project.tasks.withType(Test).configureEach { Test test -> - if (BuildParams.getIsRuntimeJavaHomeSet()) { - test.executable = "${BuildParams.runtimeJavaHome}/bin/java" + + if (buildParams.getIsRuntimeJavaHomeSet()) { + test.executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') } else { test.javaLauncher = javaToolchains.launcherFor { @@ -41,7 +40,7 @@ configure(allprojects) { } project.plugins.withId("elasticsearch.testclusters") { testClustersPlugin -> project.plugins.withId("elasticsearch.internal-testclusters") { internalPlugin -> - if (BuildParams.getIsRuntimeJavaHomeSet() == false) { + if (buildParams.getIsRuntimeJavaHomeSet() == false) { // If no runtime java home is set, use the bundled JDK for test clusters testClustersPlugin.setRuntimeJava(launcher.map { it.metadata.installationPath.asFile }) } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle index 1fab4d035177a..27b490329f8cb 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle @@ -17,11 +17,11 @@ dependencies { newJar project(":libs:${project.name}") } -BuildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince)) +buildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince)) && it != VersionProperties.elasticsearchVersion }) { bwcVersion, baseName -> - BwcVersions.UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) + BwcVersions.UnreleasedVersionInfo unreleasedVersion = buildParams.bwcVersions.unreleasedInfo(bwcVersion) configurations { "oldJar${baseName}" { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java index 04f031d4a5169..49887dac5b6fd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java @@ -12,7 +12,7 @@ import groovy.lang.Closure; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.precommit.JarHellPrecommitPlugin; import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; @@ -39,6 +39,7 @@ public void apply(Project project) { project.getPluginManager().apply(JarHellPrecommitPlugin.class); project.getPluginManager().apply(ElasticsearchJavaPlugin.class); project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); + boolean isCi = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class).isCi(); // Clear default dependencies added by public PluginBuildPlugin as we add our // own project dependencies for internal builds // TODO remove once we removed default dependencies from PluginBuildPlugin @@ -54,7 +55,7 @@ public void apply(Project project) { .set("addQaCheckDependencies", new Closure(BaseInternalPluginBuildPlugin.this, BaseInternalPluginBuildPlugin.this) { public void doCall(Project proj) { // This is only a convenience for local developers so make this a noop when running in CI - if (BuildParams.isCi() == false) { + if (isCi == false) { proj.afterEvaluate(project1 -> { // let check depend on check tasks of qa sub-projects final var checkTaskProvider = project1.getTasks().named("check"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java index 40d16bafbb26b..d7bf839817e12 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java @@ -13,7 +13,6 @@ import org.elasticsearch.gradle.LoggedExec; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; @@ -47,6 +46,7 @@ public class BwcSetupExtension { private final ProviderFactory providerFactory; private final JavaToolchainService toolChainService; private final Provider unreleasedVersionInfo; + private final Boolean isCi; private Provider checkoutDir; @@ -56,7 +56,8 @@ public BwcSetupExtension( ProviderFactory providerFactory, JavaToolchainService toolChainService, Provider unreleasedVersionInfo, - Provider checkoutDir + Provider checkoutDir, + Boolean isCi ) { this.project = project; this.objectFactory = objectFactory; @@ -64,6 +65,7 @@ public BwcSetupExtension( this.toolChainService = toolChainService; this.unreleasedVersionInfo = unreleasedVersionInfo; this.checkoutDir = checkoutDir; + this.isCi = isCi; } TaskProvider bwcTask(String name, Action configuration) { @@ -80,7 +82,8 @@ TaskProvider bwcTask(String name, Action configuration, toolChainService, name, configuration, - useUniqueUserHome + useUniqueUserHome, + isCi ); } @@ -93,7 +96,8 @@ private static TaskProvider createRunBwcGradleTask( JavaToolchainService toolChainService, String name, Action configAction, - boolean useUniqueUserHome + boolean useUniqueUserHome, + boolean isCi ) { return project.getTasks().register(name, LoggedExec.class, loggedExec -> { loggedExec.dependsOn("checkoutBwcBranch"); @@ -104,7 +108,7 @@ private static TaskProvider createRunBwcGradleTask( spec.getParameters().getCheckoutDir().set(checkoutDir); }).flatMap(s -> getJavaHome(objectFactory, toolChainService, Integer.parseInt(s)))); - if (BuildParams.isCi() && OS.current() != OS.WINDOWS) { + if (isCi && OS.current() != OS.WINDOWS) { // TODO: Disabled for now until we can figure out why files are getting corrupted // loggedExec.getEnvironment().put("GRADLE_RO_DEP_CACHE", System.getProperty("user.home") + "/gradle_ro_cache"); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index f467a204c0348..93c2623a23d31 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; +import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -60,7 +61,8 @@ * We are then able to map the unreleased version to branches in git and Gradle projects that are capable of checking * out and building them, so we can include these in the testing plan as well. */ -public class BwcVersions { + +public class BwcVersions implements Serializable { private static final Pattern LINE_PATTERN = Pattern.compile( "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)?.*\\);" @@ -68,7 +70,7 @@ public class BwcVersions { private static final String GLIBC_VERSION_ENV_VAR = "GLIBC_VERSION"; private final Version currentVersion; - private final List versions; + private final transient List versions; private final Map unreleased; public BwcVersions(List versionLines) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 05b7af83aa8e4..c897b142da2fb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.MutedTestPlugin; import org.elasticsearch.gradle.internal.test.TestUtil; @@ -49,6 +49,7 @@ public class ElasticsearchJavaBasePlugin implements Plugin { private final JavaToolchainService javaToolchains; + private BuildParameterExtension buildParams; @Inject ElasticsearchJavaBasePlugin(JavaToolchainService javaToolchains) { @@ -57,8 +58,10 @@ public class ElasticsearchJavaBasePlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); // make sure the global build info plugin is applied to the root project project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); + buildParams = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class); project.getPluginManager().apply(JavaBasePlugin.class); // common repositories setup project.getPluginManager().apply(RepositoriesSetupPlugin.class); @@ -129,14 +132,14 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S public void configureCompile(Project project) { project.getExtensions().getExtraProperties().set("compactProfile", "full"); JavaPluginExtension java = project.getExtensions().getByType(JavaPluginExtension.class); - if (BuildParams.getJavaToolChainSpec().isPresent()) { - java.toolchain(BuildParams.getJavaToolChainSpec().get()); + if (buildParams.getJavaToolChainSpec().isPresent()) { + java.toolchain(buildParams.getJavaToolChainSpec().get()); } - java.setSourceCompatibility(BuildParams.getMinimumRuntimeVersion()); - java.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion()); + java.setSourceCompatibility(buildParams.getMinimumRuntimeVersion()); + java.setTargetCompatibility(buildParams.getMinimumRuntimeVersion()); project.getTasks().withType(JavaCompile.class).configureEach(compileTask -> { compileTask.getJavaCompiler().set(javaToolchains.compilerFor(spec -> { - spec.getLanguageVersion().set(JavaLanguageVersion.of(BuildParams.getMinimumRuntimeVersion().getMajorVersion())); + spec.getLanguageVersion().set(JavaLanguageVersion.of(buildParams.getMinimumRuntimeVersion().getMajorVersion())); })); CompileOptions compileOptions = compileTask.getOptions(); @@ -159,7 +162,7 @@ public void configureCompile(Project project) { compileTask.getConventionMapping().map("sourceCompatibility", () -> java.getSourceCompatibility().toString()); compileTask.getConventionMapping().map("targetCompatibility", () -> java.getTargetCompatibility().toString()); compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); - compileOptions.setIncremental(BuildParams.isCi() == false); + compileOptions.setIncremental(buildParams.isCi() == false); }); // also apply release flag to groovy, which is used in build-tools project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java index d064c70c72819..e62c26c7fbc01 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java @@ -15,8 +15,10 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Action; +import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; @@ -24,6 +26,7 @@ import org.gradle.api.plugins.BasePlugin; import org.gradle.api.plugins.JavaLibraryPlugin; import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.bundling.Jar; import org.gradle.api.tasks.javadoc.Javadoc; @@ -34,6 +37,7 @@ import java.util.Map; import static org.elasticsearch.gradle.internal.conventions.util.Util.toStringable; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * A wrapper around Gradle's Java plugin that applies our @@ -42,13 +46,15 @@ public class ElasticsearchJavaPlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + Property buildParams = loadBuildParams(project); project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(JavaLibraryPlugin.class); project.getPluginManager().apply(ElasticsearchJavaModulePathPlugin.class); // configureConfigurations(project); - configureJars(project); - configureJarManifest(project); + configureJars(project, buildParams.get()); + configureJarManifest(project, buildParams.get()); configureJavadoc(project); testCompileOnlyDeps(project); } @@ -63,7 +69,9 @@ private static void testCompileOnlyDeps(Project project) { /** * Adds additional manifest info to jars */ - static void configureJars(Project project) { + static void configureJars(Project project, BuildParameterExtension buildParams) { + String buildDate = buildParams.getBuildDate().toString(); + JavaVersion gradleJavaVersion = buildParams.getGradleJavaVersion(); project.getTasks().withType(Jar.class).configureEach(jarTask -> { // we put all our distributable files under distributions jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); @@ -75,14 +83,11 @@ static void configureJars(Project project) { public void execute(Task task) { // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes - jarTask.getManifest() - .attributes( - Map.of("Build-Date", BuildParams.getBuildDate(), "Build-Java-Version", BuildParams.getGradleJavaVersion()) - ); + jarTask.getManifest().attributes(Map.of("Build-Date", buildDate, "Build-Java-Version", gradleJavaVersion)); } }); }); - project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { + project.getPluginManager().withPlugin("com.gradleup.shadow", p -> { project.getTasks().withType(ShadowJar.class).configureEach(shadowJar -> { /* * Replace the default "-all" classifier with null @@ -102,10 +107,13 @@ public void execute(Task task) { }); } - private static void configureJarManifest(Project project) { + private static void configureJarManifest(Project project, BuildParameterExtension buildParams) { + String gitOrigin = buildParams.getGitOrigin(); + String gitRevision = buildParams.getGitRevision(); + project.getPlugins().withType(InfoBrokerPlugin.class).whenPluginAdded(manifestPlugin -> { - manifestPlugin.add("Module-Origin", toStringable(BuildParams::getGitOrigin)); - manifestPlugin.add("Change", toStringable(BuildParams::getGitRevision)); + manifestPlugin.add("Module-Origin", toStringable(() -> gitOrigin)); + manifestPlugin.add("Change", toStringable(() -> gitRevision)); manifestPlugin.add("X-Compile-Elasticsearch-Version", toStringable(VersionProperties::getElasticsearch)); manifestPlugin.add("X-Compile-Lucene-Version", toStringable(VersionProperties::getLucene)); manifestPlugin.add( diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 7a831fbcc1464..4446952fec2bb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -13,7 +13,7 @@ import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener; import org.elasticsearch.gradle.internal.test.SimpleCommandLineArgumentProvider; @@ -26,6 +26,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.provider.Property; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; @@ -37,6 +38,7 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; import static org.elasticsearch.gradle.util.FileUtils.mkdirs; import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure; @@ -52,6 +54,9 @@ public abstract class ElasticsearchTestBasePlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + Property buildParams = loadBuildParams(project); + project.getPluginManager().apply(GradleTestPolicySetupPlugin.class); // for fips mode check project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); @@ -100,7 +105,7 @@ public void execute(Task t) { test.getExtensions().add("nonInputProperties", nonInputProperties); test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); - test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); + test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", buildParams.get().getDefaultParallel().toString()))); test.exclude("**/*$*.class"); @@ -146,9 +151,9 @@ public void execute(Task t) { // ignore changing test seed when build is passed -Dignore.tests.seed for cacheability experimentation if (System.getProperty("ignore.tests.seed") != null) { - nonInputProperties.systemProperty("tests.seed", BuildParams.getTestSeed()); + nonInputProperties.systemProperty("tests.seed", buildParams.get().getTestSeed()); } else { - test.systemProperty("tests.seed", BuildParams.getTestSeed()); + test.systemProperty("tests.seed", buildParams.get().getTestSeed()); } // don't track these as inputs since they contain absolute paths and break cache relocatability @@ -193,7 +198,7 @@ public void execute(Task t) { * If this project builds a shadow JAR than any unit tests should test against that artifact instead of * compiled class output and dependency jars. This better emulates the runtime environment of consumers. */ - project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { + project.getPluginManager().withPlugin("com.gradleup.shadow", p -> { if (test.getName().equals(JavaPlugin.TEST_TASK_NAME)) { // Remove output class files and any other dependencies from the test classpath, since the shadow JAR includes these SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index fcf286ed471dd..80fd6db59cf9f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -10,7 +10,7 @@ package org.elasticsearch.gradle.internal; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Action; import org.gradle.api.InvalidUserDataException; @@ -39,6 +39,7 @@ import static java.util.Arrays.asList; import static java.util.Arrays.stream; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * We want to be able to do BWC tests for unreleased versions without relying on and waiting for snapshots. @@ -64,23 +65,29 @@ public void apply(Project project) { project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); project.getPlugins().apply(JvmToolchainsPlugin.class); toolChainService = project.getExtensions().getByType(JavaToolchainService.class); - BuildParams.getBwcVersions().forPreviousUnreleased((BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { + BuildParameterExtension buildParams = loadBuildParams(project).get(); + Boolean isCi = buildParams.isCi(); + buildParams.getBwcVersions().forPreviousUnreleased((BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { configureBwcProject( project.project(unreleasedVersion.gradleProjectPath()), + buildParams, unreleasedVersion, providerFactory, objectFactory, - toolChainService + toolChainService, + isCi ); }); } private static void configureBwcProject( Project project, + BuildParameterExtension buildParams, BwcVersions.UnreleasedVersionInfo versionInfo, ProviderFactory providerFactory, ObjectFactory objectFactory, - JavaToolchainService toolChainService + JavaToolchainService toolChainService, + Boolean isCi ) { ProjectLayout layout = project.getLayout(); Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); @@ -96,7 +103,8 @@ private static void configureBwcProject( providerFactory, toolChainService, versionInfoProvider, - checkoutDir + checkoutDir, + isCi ); BwcGitExtension gitExtension = project.getPlugins().apply(InternalBwcGitPlugin.class).getGitExtension(); Provider bwcVersion = versionInfoProvider.map(info -> info.version()); @@ -122,6 +130,7 @@ private static void configureBwcProject( for (DistributionProject distributionProject : distributionProjects) { createBuildBwcTask( bwcSetupExtension, + buildParams, project, bwcVersion, distributionProject.name, @@ -144,6 +153,7 @@ private static void configureBwcProject( createBuildBwcTask( bwcSetupExtension, + buildParams, project, bwcVersion, "jdbc", @@ -177,6 +187,7 @@ private static void configureBwcProject( createBuildBwcTask( bwcSetupExtension, + buildParams, project, bwcVersion, stableApiProject.getName(), @@ -296,6 +307,7 @@ public static String buildBwcTaskName(String projectName) { static void createBuildBwcTask( BwcSetupExtension bwcSetupExtension, + BuildParameterExtension buildParams, Project project, Provider bwcVersion, String projectName, @@ -316,7 +328,7 @@ static void createBuildBwcTask( } else { c.getOutputs().files(expectedOutputFile); } - c.getOutputs().doNotCacheIf("BWC distribution caching is disabled for local builds", task -> BuildParams.isCi() == false); + c.getOutputs().doNotCacheIf("BWC distribution caching is disabled for local builds", task -> buildParams.isCi() == false); c.getArgs().add("-p"); c.getArgs().add(projectPath); c.getArgs().add(assembleTaskName); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 0bf4bcb33c23b..60699522cdc3f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -20,7 +20,7 @@ import org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; @@ -35,6 +35,8 @@ import java.util.Map; import java.util.function.Function; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + /** * An internal elasticsearch build plugin that registers additional * distribution resolution strategies to the 'elasticsearch.download-distribution' plugin @@ -47,6 +49,8 @@ public void apply(Project project) { // this is needed for isInternal project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); project.getRootProject().getPluginManager().apply(DockerSupportPlugin.class); + BuildParameterExtension buildParams = loadBuildParams(project).get(); + DistributionDownloadPlugin distributionDownloadPlugin = project.getPlugins().apply(DistributionDownloadPlugin.class); Provider dockerSupport = GradleUtils.getBuildService( project.getGradle().getSharedServices(), @@ -55,7 +59,10 @@ public void apply(Project project) { distributionDownloadPlugin.setDockerAvailability( dockerSupport.map(dockerSupportService -> dockerSupportService.getDockerAvailability().isAvailable()) ); - registerInternalDistributionResolutions(DistributionDownloadPlugin.getRegistrationsContainer(project)); + registerInternalDistributionResolutions( + DistributionDownloadPlugin.getRegistrationsContainer(project), + buildParams.getBwcVersionsProperty() + ); } /** @@ -66,7 +73,7 @@ public void apply(Project project) { *

* BWC versions are resolved as project to projects under `:distribution:bwc`. */ - private void registerInternalDistributionResolutions(List resolutions) { + private void registerInternalDistributionResolutions(List resolutions, Provider bwcVersions) { resolutions.add(new DistributionResolution("local-build", (project, distribution) -> { if (isCurrentVersion(distribution)) { // non-external project, so depend on local build @@ -78,7 +85,7 @@ private void registerInternalDistributionResolutions(List { - BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions() + BwcVersions.UnreleasedVersionInfo unreleasedInfo = bwcVersions.get() .unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { if (distribution.getBundledJdk() == false) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java index c7ab83ff7829a..7e7ffad12a9a5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java @@ -10,34 +10,29 @@ package org.elasticsearch.gradle.internal; import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.provider.ProviderFactory; -import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class InternalTestClustersPlugin implements Plugin { - private ProviderFactory providerFactory; - - @Inject - public InternalTestClustersPlugin(ProviderFactory providerFactory) { - this.providerFactory = providerFactory; - } - @Override public void apply(Project project) { project.getPlugins().apply(InternalDistributionDownloadPlugin.class); + project.getRootProject().getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + BuildParameterExtension buildParams = loadBuildParams(project).get(); project.getRootProject().getPluginManager().apply(InternalReaperPlugin.class); TestClustersPlugin testClustersPlugin = project.getPlugins().apply(TestClustersPlugin.class); - testClustersPlugin.setRuntimeJava(providerFactory.provider(() -> BuildParams.getRuntimeJavaHome())); + testClustersPlugin.setRuntimeJava(buildParams.getRuntimeJavaHome()); testClustersPlugin.setIsReleasedVersion( - version -> (version.equals(VersionProperties.getElasticsearchVersion()) && BuildParams.isSnapshotBuild() == false) - || BuildParams.getBwcVersions().unreleasedInfo(version) == null + version -> (version.equals(VersionProperties.getElasticsearchVersion()) && buildParams.isSnapshotBuild() == false) + || buildParams.getBwcVersions().unreleasedInfo(version) == null ); if (shouldConfigureTestClustersWithOneProcessor()) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index f988208ab4fec..d1585120b0803 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -9,7 +9,8 @@ package org.elasticsearch.gradle.internal; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.JavaVersion; @@ -47,6 +48,7 @@ import javax.inject.Inject; import static de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin.FORBIDDEN_APIS_TASK_NAME; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; import static org.objectweb.asm.Opcodes.V_PREVIEW; public class MrjarPlugin implements Plugin { @@ -64,6 +66,8 @@ public class MrjarPlugin implements Plugin { @Override public void apply(Project project) { project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); var javaExtension = project.getExtensions().getByType(JavaPluginExtension.class); var isIdeaSync = System.getProperty("idea.sync.active", "false").equals("true"); var ideaSourceSetsEnabled = project.hasProperty(MRJAR_IDEA_ENABLED) && project.property(MRJAR_IDEA_ENABLED).equals("true"); @@ -89,7 +93,7 @@ public void apply(Project project) { String testSourceSetName = SourceSet.TEST_SOURCE_SET_NAME + javaVersion; SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion); testSourceSets.add(testSourceSetName); - createTestTask(project, testSourceSet, javaVersion, mainSourceSets); + createTestTask(project, buildParams, testSourceSet, javaVersion, mainSourceSets); } } @@ -163,7 +167,13 @@ private void configureSourceSetInJar(Project project, SourceSet sourceSet, int j jarTask.configure(task -> task.into("META-INF/versions/" + javaVersion, copySpec -> copySpec.from(sourceSet.getOutput()))); } - private void createTestTask(Project project, SourceSet sourceSet, int javaVersion, List mainSourceSets) { + private void createTestTask( + Project project, + BuildParameterExtension buildParams, + SourceSet sourceSet, + int javaVersion, + List mainSourceSets + ) { var jarTask = project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME); var testTaskProvider = project.getTasks().register(JavaPlugin.TEST_TASK_NAME + javaVersion, Test.class); testTaskProvider.configure(testTask -> { @@ -180,9 +190,9 @@ private void createTestTask(Project project, SourceSet sourceSet, int javaVersio // only set the jdk if runtime java isn't set because setting the toolchain is incompatible with // runtime java setting the executable directly - if (BuildParams.getIsRuntimeJavaHomeSet()) { + if (buildParams.getIsRuntimeJavaHomeSet()) { testTask.onlyIf("runtime java must support java " + javaVersion, t -> { - JavaVersion runtimeJavaVersion = BuildParams.getRuntimeJavaVersion(); + JavaVersion runtimeJavaVersion = buildParams.getRuntimeJavaVersion().get(); return runtimeJavaVersion.isCompatibleWith(JavaVersion.toVersion(javaVersion)); }); } else { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java index 84a6432041ed1..7348181c4199c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java @@ -8,6 +8,7 @@ */ package org.elasticsearch.gradle.internal.docker; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; @@ -17,6 +18,8 @@ import java.util.List; import java.util.stream.Collectors; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + /** * Plugin providing {@link DockerSupportService} for detecting Docker installations and determining requirements for Docker-based * Elasticsearch build tasks. @@ -30,11 +33,14 @@ public void apply(Project project) { if (project != project.getRootProject()) { throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project."); } + project.getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); Provider dockerSupportServiceProvider = project.getGradle() .getSharedServices() .registerIfAbsent(DOCKER_SUPPORT_SERVICE_NAME, DockerSupportService.class, spec -> spec.parameters(params -> { params.setExclusionsFile(new File(project.getRootDir(), DOCKER_ON_LINUX_EXCLUSIONS_FILE)); + params.getIsCI().set(buildParams.isCi()); })); // Ensure that if we are trying to run any DockerBuildTask tasks, we assert an available Docker installation exists diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java index 344a477e74ef9..f40f5d932b701 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java @@ -13,10 +13,10 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.GradleException; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.provider.Property; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceParameters; @@ -59,7 +59,6 @@ public abstract class DockerSupportService implements BuildService serviceInfos; private Map> tcpPorts; private Map> udpPorts; @@ -228,7 +227,7 @@ private boolean isExcludedOs() { // We don't attempt to check the current flavor and version of Linux unless we're // running in CI, because we don't want to stop people running the Docker tests in // their own environments if they really want to. - if (BuildParams.isCi() == false) { + if (getParameters().getIsCI().get().booleanValue() == false) { return false; } @@ -356,10 +355,6 @@ public Map> getUdpPorts() { return udpPorts; } - public void setServiceInfos(Map serviceInfos) { - this.serviceInfos = serviceInfos; - } - /** * An immutable class that represents the results of a Docker search from {@link #getDockerAvailability()}}. */ @@ -402,5 +397,7 @@ interface Parameters extends BuildServiceParameters { File getExclusionsFile(); void setExclusionsFile(File exclusionsFile); + + Property getIsCI(); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java new file mode 100644 index 0000000000000..5531194e0abde --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.info; + +import org.elasticsearch.gradle.internal.BwcVersions; +import org.gradle.api.Action; +import org.gradle.api.JavaVersion; +import org.gradle.api.Task; +import org.gradle.api.provider.Property; +import org.gradle.api.provider.Provider; +import org.gradle.api.provider.ProviderFactory; +import org.gradle.jvm.toolchain.JavaToolchainSpec; + +import java.io.File; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; + +public abstract class BuildParameterExtension { + private final Provider inFipsJvm; + private final Provider runtimeJavaHome; + private final Boolean isRuntimeJavaHomeSet; + private final List javaVersions; + private final JavaVersion minimumCompilerVersion; + private final JavaVersion minimumRuntimeVersion; + private final JavaVersion gradleJavaVersion; + private final Provider runtimeJavaVersion; + private final Provider> javaToolChainSpec; + private final Provider runtimeJavaDetails; + private final String gitRevision; + private transient AtomicReference buildDate = new AtomicReference<>(); + private final String testSeed; + private final Boolean isCi; + private final Integer defaultParallel; + private final Boolean isSnapshotBuild; + + public BuildParameterExtension( + ProviderFactory providers, + Provider runtimeJavaHome, + Provider> javaToolChainSpec, + Provider runtimeJavaVersion, + boolean isRuntimeJavaHomeSet, + Provider runtimeJavaDetails, + List javaVersions, + JavaVersion minimumCompilerVersion, + JavaVersion minimumRuntimeVersion, + JavaVersion gradleJavaVersion, + String gitRevision, + String gitOrigin, + ZonedDateTime buildDate, + String testSeed, + boolean isCi, + int defaultParallel, + final boolean isSnapshotBuild, + Provider bwcVersions + ) { + this.inFipsJvm = providers.systemProperty("tests.fips.enabled").map(BuildParameterExtension::parseBoolean); + this.runtimeJavaHome = runtimeJavaHome; + this.javaToolChainSpec = javaToolChainSpec; + this.runtimeJavaVersion = runtimeJavaVersion; + this.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet; + this.runtimeJavaDetails = runtimeJavaDetails; + this.javaVersions = javaVersions; + this.minimumCompilerVersion = minimumCompilerVersion; + this.minimumRuntimeVersion = minimumRuntimeVersion; + this.gradleJavaVersion = gradleJavaVersion; + this.gitRevision = gitRevision; + this.testSeed = testSeed; + this.isCi = isCi; + this.defaultParallel = defaultParallel; + this.isSnapshotBuild = isSnapshotBuild; + this.getBwcVersionsProperty().set(bwcVersions); + this.getGitOriginProperty().set(gitOrigin); + } + + private static boolean parseBoolean(String s) { + if (s == null) { + return false; + } + return Boolean.parseBoolean(s); + } + + public boolean getInFipsJvm() { + return inFipsJvm.getOrElse(false); + } + + public Provider getRuntimeJavaHome() { + return runtimeJavaHome; + } + + public void withFipsEnabledOnly(Task task) { + task.onlyIf("FIPS mode disabled", task1 -> getInFipsJvm() == false); + } + + public Boolean getIsRuntimeJavaHomeSet() { + return isRuntimeJavaHomeSet; + } + + public List getJavaVersions() { + return javaVersions; + } + + public JavaVersion getMinimumCompilerVersion() { + return minimumCompilerVersion; + } + + public JavaVersion getMinimumRuntimeVersion() { + return minimumRuntimeVersion; + } + + public JavaVersion getGradleJavaVersion() { + return gradleJavaVersion; + } + + public Provider getRuntimeJavaVersion() { + return runtimeJavaVersion; + } + + public Provider> getJavaToolChainSpec() { + return javaToolChainSpec; + } + + public Provider getRuntimeJavaDetails() { + return runtimeJavaDetails; + } + + public String getGitRevision() { + return gitRevision; + } + + public String getGitOrigin() { + return getGitOriginProperty().get(); + } + + public ZonedDateTime getBuildDate() { + ZonedDateTime value = buildDate.get(); + if (value == null) { + value = ZonedDateTime.now(ZoneOffset.UTC); + if (buildDate.compareAndSet(null, value) == false) { + // If another thread initialized it first, return the initialized value + value = buildDate.get(); + } + } + return value; + } + + public String getTestSeed() { + return testSeed; + } + + public Boolean isCi() { + return isCi; + } + + public Integer getDefaultParallel() { + return defaultParallel; + } + + public Boolean isSnapshotBuild() { + return isSnapshotBuild; + } + + public BwcVersions getBwcVersions() { + return getBwcVersionsProperty().get(); + } + + public abstract Property getBwcVersionsProperty(); + + public abstract Property getGitOriginProperty(); + + public Random getRandom() { + return new Random(Long.parseUnsignedLong(testSeed.split(":")[0], 16)); + } + + public Boolean isGraalVmRuntime() { + return runtimeJavaDetails.get().toLowerCase().contains("graalvm"); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterService.java new file mode 100644 index 0000000000000..ec1bc4aec1324 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterService.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.info; + +import org.gradle.api.provider.Property; +import org.gradle.api.services.BuildService; +import org.gradle.api.services.BuildServiceParameters; + +public abstract class BuildParameterService implements BuildService, AutoCloseable { + public interface Params extends BuildServiceParameters { + Property getBuildParams(); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java index d3afeed9f8578..ea8aeda8fc099 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java @@ -8,43 +8,13 @@ */ package org.elasticsearch.gradle.internal.info; -import org.elasticsearch.gradle.internal.BwcVersions; -import org.gradle.api.Action; -import org.gradle.api.JavaVersion; -import org.gradle.api.Task; -import org.gradle.api.provider.Provider; -import org.gradle.jvm.toolchain.JavaToolchainSpec; - -import java.io.File; -import java.io.IOException; import java.lang.reflect.Modifier; -import java.time.ZonedDateTime; import java.util.Arrays; -import java.util.List; -import java.util.Random; import java.util.function.Consumer; -import static java.util.Objects.requireNonNull; - +@Deprecated public class BuildParams { - private static Provider runtimeJavaHome; - private static Boolean isRuntimeJavaHomeSet; - private static List javaVersions; - private static JavaVersion minimumCompilerVersion; - private static JavaVersion minimumRuntimeVersion; - private static JavaVersion gradleJavaVersion; - private static Provider runtimeJavaVersion; - private static Provider> javaToolChainSpec; - private static Provider runtimeJavaDetails; - private static Boolean inFipsJvm; - private static String gitRevision; - private static String gitOrigin; - private static ZonedDateTime buildDate; - private static String testSeed; private static Boolean isCi; - private static Integer defaultParallel; - private static Boolean isSnapshotBuild; - private static Provider bwcVersions; /** * Initialize global build parameters. This method accepts and a initialization function which in turn accepts a @@ -58,90 +28,10 @@ public static void init(Consumer initializer) { initializer.accept(MutableBuildParams.INSTANCE); } - public static File getRuntimeJavaHome() { - return value(runtimeJavaHome).get(); - } - - public static Boolean getIsRuntimeJavaHomeSet() { - return value(isRuntimeJavaHomeSet); - } - - public static List getJavaVersions() { - return value(javaVersions); - } - - public static JavaVersion getMinimumCompilerVersion() { - return value(minimumCompilerVersion); - } - - public static JavaVersion getMinimumRuntimeVersion() { - return value(minimumRuntimeVersion); - } - - public static JavaVersion getGradleJavaVersion() { - return value(gradleJavaVersion); - } - - public static JavaVersion getRuntimeJavaVersion() { - return value(runtimeJavaVersion.get()); - } - - public static String getRuntimeJavaDetails() { - return value(runtimeJavaDetails.get()); - } - - public static Boolean isInFipsJvm() { - return value(inFipsJvm); - } - - public static void withFipsEnabledOnly(Task task) { - task.onlyIf("FIPS mode disabled", task1 -> isInFipsJvm() == false); - } - - public static String getGitRevision() { - return value(gitRevision); - } - - public static String getGitOrigin() { - return value(gitOrigin); - } - - public static ZonedDateTime getBuildDate() { - return value(buildDate); - } - - public static BwcVersions getBwcVersions() { - return value(bwcVersions).get(); - } - - public static String getTestSeed() { - return value(testSeed); - } - - public static Random getRandom() { - return new Random(Long.parseUnsignedLong(testSeed.split(":")[0], 16)); - } - public static Boolean isCi() { return value(isCi); } - public static Boolean isGraalVmRuntime() { - return value(runtimeJavaDetails.get().toLowerCase().contains("graalvm")); - } - - public static Integer getDefaultParallel() { - return value(defaultParallel); - } - - public static boolean isSnapshotBuild() { - return value(BuildParams.isSnapshotBuild); - } - - public static Provider> getJavaToolChainSpec() { - return javaToolChainSpec; - } - private static T value(T object) { if (object == null) { String callingMethod = Thread.currentThread().getStackTrace()[2].getMethodName(); @@ -183,82 +73,8 @@ public void reset() { }); } - public void setRuntimeJavaHome(Provider runtimeJavaHome) { - BuildParams.runtimeJavaHome = runtimeJavaHome.map(javaHome -> { - try { - return javaHome.getCanonicalFile(); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - } - - public void setIsRuntimeJavaHomeSet(boolean isRuntimeJavaHomeSet) { - BuildParams.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet; - } - - public void setJavaVersions(List javaVersions) { - BuildParams.javaVersions = requireNonNull(javaVersions); - } - - public void setMinimumCompilerVersion(JavaVersion minimumCompilerVersion) { - BuildParams.minimumCompilerVersion = requireNonNull(minimumCompilerVersion); - } - - public void setMinimumRuntimeVersion(JavaVersion minimumRuntimeVersion) { - BuildParams.minimumRuntimeVersion = requireNonNull(minimumRuntimeVersion); - } - - public void setGradleJavaVersion(JavaVersion gradleJavaVersion) { - BuildParams.gradleJavaVersion = requireNonNull(gradleJavaVersion); - } - - public void setRuntimeJavaVersion(Provider runtimeJavaVersion) { - BuildParams.runtimeJavaVersion = requireNonNull(runtimeJavaVersion); - } - - public void setRuntimeJavaDetails(Provider runtimeJavaDetails) { - BuildParams.runtimeJavaDetails = runtimeJavaDetails; - } - - public void setInFipsJvm(boolean inFipsJvm) { - BuildParams.inFipsJvm = inFipsJvm; - } - - public void setGitRevision(String gitRevision) { - BuildParams.gitRevision = requireNonNull(gitRevision); - } - - public void setGitOrigin(String gitOrigin) { - BuildParams.gitOrigin = requireNonNull(gitOrigin); - } - - public void setBuildDate(ZonedDateTime buildDate) { - BuildParams.buildDate = requireNonNull(buildDate); - } - - public void setTestSeed(String testSeed) { - BuildParams.testSeed = requireNonNull(testSeed); - } - public void setIsCi(boolean isCi) { BuildParams.isCi = isCi; } - - public void setDefaultParallel(int defaultParallel) { - BuildParams.defaultParallel = defaultParallel; - } - - public void setIsSnapshotBuild(final boolean isSnapshotBuild) { - BuildParams.isSnapshotBuild = isSnapshotBuild; - } - - public void setBwcVersions(Provider bwcVersions) { - BuildParams.bwcVersions = requireNonNull(bwcVersions); - } - - public void setJavaToolChainSpec(Provider> javaToolChain) { - BuildParams.javaToolChainSpec = javaToolChain; - } } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 2d5a28bdd6af9..761b0601a1c24 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -58,6 +58,8 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.conventions.GUtils.elvis; + public class GlobalBuildInfoPlugin implements Plugin { private static final Logger LOGGER = Logging.getLogger(GlobalBuildInfoPlugin.class); private static final String DEFAULT_VERSION_JAVA_FILE_PATH = "server/src/main/java/org/elasticsearch/Version.java"; @@ -67,6 +69,7 @@ public class GlobalBuildInfoPlugin implements Plugin { private final JvmMetadataDetector metadataDetector; private final ProviderFactory providers; private JavaToolchainService toolChainService; + private Project project; @Inject public GlobalBuildInfoPlugin( @@ -87,6 +90,7 @@ public void apply(Project project) { if (project != project.getRootProject()) { throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project."); } + this.project = project; project.getPlugins().apply(JvmToolchainsPlugin.class); toolChainService = project.getExtensions().getByType(JavaToolchainService.class); GradleVersion minimumGradleVersion = GradleVersion.version(getResourceContents("/minimumGradleVersion")); @@ -98,55 +102,61 @@ public void apply(Project project) { JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); Provider explicitRuntimeJavaHome = findRuntimeJavaHome(); - boolean isExplicitRuntimeJavaHomeSet = explicitRuntimeJavaHome.isPresent(); - Provider actualRuntimeJavaHome = isExplicitRuntimeJavaHomeSet + boolean isRuntimeJavaHomeExplicitlySet = explicitRuntimeJavaHome.isPresent(); + Provider actualRuntimeJavaHome = isRuntimeJavaHomeExplicitlySet ? explicitRuntimeJavaHome : resolveJavaHomeFromToolChainService(VersionProperties.getBundledJdkMajorVersion()); GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir()); - BuildParams.init(params -> { - params.reset(); - params.setRuntimeJavaHome(actualRuntimeJavaHome); - params.setJavaToolChainSpec(resolveToolchainSpecFromEnv()); - Provider runtimeJdkMetaData = actualRuntimeJavaHome.map( - runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)) - ); - params.setRuntimeJavaVersion( + Provider runtimeJdkMetaData = actualRuntimeJavaHome.map( + runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)) + ); + AtomicReference cache = new AtomicReference<>(); + Provider bwcVersionsProvider = providers.provider( + () -> cache.updateAndGet(val -> val == null ? resolveBwcVersions() : val) + ); + BuildParameterExtension buildParams = project.getExtensions() + .create( + "buildParams", + BuildParameterExtension.class, + actualRuntimeJavaHome, + resolveToolchainSpecFromEnv(), actualRuntimeJavaHome.map( javaHome -> determineJavaVersion( "runtime java.home", javaHome, - isExplicitRuntimeJavaHomeSet + isRuntimeJavaHomeExplicitlySet ? minimumRuntimeVersion : JavaVersion.toVersion(VersionProperties.getBundledJdkMajorVersion()) ) - ) + ), + isRuntimeJavaHomeExplicitlySet, + runtimeJdkMetaData.map(m -> formatJavaVendorDetails(m)), + getAvailableJavaVersions(), + minimumCompilerVersion, + minimumRuntimeVersion, + Jvm.current().getJavaVersion(), + gitInfo.getRevision(), + gitInfo.getOrigin(), + ZonedDateTime.now(ZoneOffset.UTC), + getTestSeed(), + System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null || System.getProperty("isCI") != null, + ParallelDetector.findDefaultParallel(project), + Util.getBooleanProperty("build.snapshot", true), + bwcVersionsProvider ); - params.setIsRuntimeJavaHomeSet(isExplicitRuntimeJavaHomeSet); - params.setRuntimeJavaDetails(runtimeJdkMetaData.map(m -> formatJavaVendorDetails(m))); - params.setJavaVersions(getAvailableJavaVersions()); - params.setMinimumCompilerVersion(minimumCompilerVersion); - params.setMinimumRuntimeVersion(minimumRuntimeVersion); - params.setGradleJavaVersion(Jvm.current().getJavaVersion()); - params.setGitRevision(gitInfo.getRevision()); - params.setGitOrigin(gitInfo.getOrigin()); - params.setBuildDate(ZonedDateTime.now(ZoneOffset.UTC)); - params.setTestSeed(getTestSeed()); + + project.getGradle().getSharedServices().registerIfAbsent("buildParams", BuildParameterService.class, spec -> { + // Provide some parameters + spec.getParameters().getBuildParams().set(buildParams); + }); + + BuildParams.init(params -> { + params.reset(); params.setIsCi( System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null || System.getProperty("isCI") != null ); - params.setDefaultParallel(ParallelDetector.findDefaultParallel(project)); - params.setInFipsJvm(Util.getBooleanProperty("tests.fips.enabled", false)); - params.setIsSnapshotBuild(Util.getBooleanProperty("build.snapshot", true)); - AtomicReference cache = new AtomicReference<>(); - params.setBwcVersions( - providers.provider( - () -> cache.updateAndGet( - val -> val == null ? resolveBwcVersions(Util.locateElasticsearchWorkspace(project.getGradle())) : val - ) - ) - ); }); // Enforce the minimum compiler version @@ -155,7 +165,7 @@ public void apply(Project project) { // Print global build info header just before task execution // Only do this if we are the root build of a composite if (GradleUtils.isIncludedBuild(project) == false) { - project.getGradle().getTaskGraph().whenReady(graph -> logGlobalBuildInfo()); + project.getGradle().getTaskGraph().whenReady(graph -> logGlobalBuildInfo(buildParams)); } } @@ -180,9 +190,12 @@ private String formatJavaVendorDetails(JvmInstallationMetadata runtimeJdkMetaDat /* Introspect all versions of ES that may be tested against for backwards * compatibility. It is *super* important that this logic is the same as the * logic in VersionUtils.java. */ - private static BwcVersions resolveBwcVersions(File root) { - File versionsFile = new File(root, DEFAULT_VERSION_JAVA_FILE_PATH); - try (var is = new FileInputStream(versionsFile)) { + private BwcVersions resolveBwcVersions() { + String versionsFilePath = elvis( + System.getProperty("BWC_VERSION_SOURCE"), + new File(Util.locateElasticsearchWorkspace(project.getGradle()), DEFAULT_VERSION_JAVA_FILE_PATH).getPath() + ); + try (var is = new FileInputStream(versionsFilePath)) { List versionLines = IOUtils.readLines(is, "UTF-8"); return new BwcVersions(versionLines); } catch (IOException e) { @@ -190,7 +203,7 @@ private static BwcVersions resolveBwcVersions(File root) { } } - private void logGlobalBuildInfo() { + private void logGlobalBuildInfo(BuildParameterExtension buildParams) { final String osName = System.getProperty("os.name"); final String osVersion = System.getProperty("os.version"); final String osArch = System.getProperty("os.arch"); @@ -202,14 +215,14 @@ private void logGlobalBuildInfo() { LOGGER.quiet("Elasticsearch Build Hamster says Hello!"); LOGGER.quiet(" Gradle Version : " + GradleVersion.current().getVersion()); LOGGER.quiet(" OS Info : " + osName + " " + osVersion + " (" + osArch + ")"); - if (BuildParams.getIsRuntimeJavaHomeSet()) { - JvmInstallationMetadata runtimeJvm = metadataDetector.getMetadata(getJavaInstallation(BuildParams.getRuntimeJavaHome())); + if (buildParams.getIsRuntimeJavaHomeSet()) { + JvmInstallationMetadata runtimeJvm = metadataDetector.getMetadata(getJavaInstallation(buildParams.getRuntimeJavaHome().get())); final String runtimeJvmVendorDetails = runtimeJvm.getVendor().getDisplayName(); final String runtimeJvmImplementationVersion = runtimeJvm.getJvmVersion(); final String runtimeVersion = runtimeJvm.getRuntimeVersion(); final String runtimeExtraDetails = runtimeJvmVendorDetails + ", " + runtimeVersion; LOGGER.quiet(" Runtime JDK Version : " + runtimeJvmImplementationVersion + " (" + runtimeExtraDetails + ")"); - LOGGER.quiet(" Runtime java.home : " + BuildParams.getRuntimeJavaHome()); + LOGGER.quiet(" Runtime java.home : " + buildParams.getRuntimeJavaHome().get()); LOGGER.quiet(" Gradle JDK Version : " + gradleJvmImplementationVersion + " (" + gradleJvmVendorDetails + ")"); LOGGER.quiet(" Gradle java.home : " + gradleJvm.getJavaHome()); } else { @@ -220,8 +233,8 @@ private void logGlobalBuildInfo() { if (javaToolchainHome != null) { LOGGER.quiet(" JAVA_TOOLCHAIN_HOME : " + javaToolchainHome); } - LOGGER.quiet(" Random Testing Seed : " + BuildParams.getTestSeed()); - LOGGER.quiet(" In FIPS 140 mode : " + BuildParams.isInFipsJvm()); + LOGGER.quiet(" Random Testing Seed : " + buildParams.getTestSeed()); + LOGGER.quiet(" In FIPS 140 mode : " + buildParams.getInFipsJvm()); LOGGER.quiet("======================================="); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java index f71c86b19a140..f1ec236efe646 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; @@ -30,7 +30,7 @@ public class ForbiddenApisPrecommitPlugin extends PrecommitPlugin { @Override public TaskProvider createTask(Project project) { project.getPluginManager().apply(JavaBasePlugin.class); - + var buildParams = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class); // Create a convenience task for all checks (this does not conflict with extension, as it has higher priority in DSL): var forbiddenTask = project.getTasks() .register(FORBIDDEN_APIS_TASK_NAME, task -> { task.setDescription("Runs forbidden-apis checks."); }); @@ -57,7 +57,7 @@ public TaskProvider createTask(Project project) { t.setClassesDirs(sourceSet.getOutput().getClassesDirs()); t.dependsOn(resourcesTask); t.setClasspath(sourceSet.getRuntimeClasspath().plus(sourceSet.getCompileClasspath())); - t.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion().getMajorVersion()); + t.setTargetCompatibility(buildParams.getMinimumRuntimeVersion().getMajorVersion()); t.getBundledSignatures().set(BUNDLED_SIGNATURE_DEFAULTS); t.setSignaturesFiles( project.files( diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index 80cece6074ab7..f70e25a57e331 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -12,17 +12,19 @@ import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin; import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.component.ModuleComponentIdentifier; +import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskProvider; import java.io.File; import java.nio.file.Path; import static org.elasticsearch.gradle.internal.util.DependenciesUtils.createFileCollectionFromNonTransitiveArtifactsView; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @@ -31,10 +33,14 @@ public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @Override public TaskProvider createTask(Project project) { + project.getRootProject().getPlugins().apply(CompileOnlyResolvePlugin.class); + Property buildParams = loadBuildParams(project); + project.getPlugins().apply(CompileOnlyResolvePlugin.class); project.getConfigurations().create("forbiddenApisCliJar"); project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.6"); Configuration jdkJarHellConfig = project.getConfigurations().create(JDK_JAR_HELL_CONFIG_NAME); + if (project.getPath().equals(LIBS_ELASTICSEARCH_CORE_PROJECT_PATH) == false) { // Internal projects are not all plugins, so make sure the check is available // we are not doing this for this project itself to avoid jar hell with itself @@ -66,9 +72,12 @@ public TaskProvider createTask(Project project) { && ((ModuleComponentIdentifier) identifier).getGroup().startsWith("org.elasticsearch") == false ) ); + if (buildParams.get().getIsRuntimeJavaHomeSet()) { + t.getRuntimeJavaVersion().set(buildParams.get().getRuntimeJavaVersion()); + } t.dependsOn(resourcesTask); - t.getTargetCompatibility().set(project.provider(BuildParams::getRuntimeJavaVersion)); - t.getJavaHome().set(project.provider(BuildParams::getRuntimeJavaHome).map(File::getPath)); + t.getTargetCompatibility().set(buildParams.flatMap(params -> params.getRuntimeJavaVersion())); + t.getJavaHome().set(buildParams.flatMap(params -> params.getRuntimeJavaHome()).map(File::getPath)); t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile()); t.getJdkJarHellClasspath().from(jdkJarHellConfig); t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 7afee8acdd4d2..442797775de2f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -13,7 +13,6 @@ import org.apache.commons.io.output.NullOutputStream; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.DefaultTask; import org.gradle.api.JavaVersion; import org.gradle.api.file.ArchiveOperations; @@ -194,6 +193,10 @@ public Set getMissingClassExcludes() { @SkipWhenEmpty public abstract ConfigurableFileCollection getJarsToScan(); + @Input + @Optional + public abstract Property getRuntimeJavaVersion(); + @Classpath public FileCollection getClasspath() { return classpath; @@ -371,14 +374,10 @@ private String runForbiddenAPIsCli() throws IOException { /** Returns true iff the build Java version is the same as the given version. */ private boolean isJavaVersion(JavaVersion version) { - if (BuildParams.getIsRuntimeJavaHomeSet()) { - if (version.equals(BuildParams.getRuntimeJavaVersion())) { - return true; - } - } else if (version.getMajorVersion().equals(VersionProperties.getBundledJdkMajorVersion())) { - return true; + if (getRuntimeJavaVersion().isPresent()) { + return getRuntimeJavaVersion().get().equals(version); } - return false; + return version.getMajorVersion().equals(VersionProperties.getBundledJdkMajorVersion()); } private Set runJdkJarHellCheck() throws IOException { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java index 31c6b503d7328..b19c1207d56fb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java @@ -11,7 +11,6 @@ import groovy.json.JsonOutput; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.artifacts.Configuration; @@ -118,7 +117,7 @@ private Map> projectAttributesData() { } private Object buildTargetData() { - return Map.of("remoteUrl", remoteUrl.get(), "branch", BuildParams.getGitRevision()); + return Map.of("remoteUrl", remoteUrl.get(), "branch", getGitRevision().get()); } @InputFiles @@ -160,4 +159,9 @@ public Property getRemoteUrl() { public Property getTargetReference() { return targetReference; } + + @Input + public Property getGitRevision() { + return targetReference; + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java index b3e3d7f7c004e..fa10daf8dfaaa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java @@ -10,17 +10,22 @@ package org.elasticsearch.gradle.internal.snyk; import org.elasticsearch.gradle.internal.conventions.info.GitInfo; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.ProjectLayout; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.plugins.JavaPluginExtension; +import org.gradle.api.provider.Property; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.SourceSet; import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class SnykDependencyMonitoringGradlePlugin implements Plugin { public static final String UPLOAD_TASK_NAME = "uploadSnykDependencyGraph"; @@ -35,10 +40,14 @@ public SnykDependencyMonitoringGradlePlugin(ProjectLayout projectLayout, Provide @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + Property buildParams = loadBuildParams(project); + var generateTaskProvider = project.getTasks() .register("generateSnykDependencyGraph", GenerateSnykDependencyGraph.class, generateSnykDependencyGraph -> { generateSnykDependencyGraph.getProjectPath().set(project.getPath()); generateSnykDependencyGraph.getProjectName().set(project.getName()); + generateSnykDependencyGraph.getGitRevision().set(buildParams.get().getGitRevision()); String projectVersion = project.getVersion().toString(); generateSnykDependencyGraph.getVersion().set(projectVersion); generateSnykDependencyGraph.getGradleVersion().set(project.getGradle().getGradleVersion()); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 8e7884888b63b..e8d2bbd93ff20 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -16,11 +16,11 @@ import org.elasticsearch.gradle.ElasticsearchDistributionType; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.internal.BwcVersions; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.JdkDownloadPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Action; @@ -54,6 +54,7 @@ import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_UBI; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_WOLFI; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.RPM; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * This class defines gradle tasks for testing our various distribution artifacts. @@ -72,6 +73,7 @@ public void apply(Project project) { project.getPlugins().apply(InternalDistributionDownloadPlugin.class); project.getPlugins().apply(JdkDownloadPlugin.class); project.getPluginManager().apply("elasticsearch.java"); + var buildParams = loadBuildParams(project).get(); Provider dockerSupport = GradleUtils.getBuildService( project.getGradle().getSharedServices(), @@ -84,7 +86,7 @@ public void apply(Project project) { List testDistributions = configureDistributions(project); Map> lifecycleTasks = lifecycleTasks(project, "destructiveDistroTest"); - Map> versionTasks = versionTasks(project, "destructiveDistroUpgradeTest"); + Map> versionTasks = versionTasks(project, "destructiveDistroUpgradeTest", buildParams.getBwcVersions()); TaskProvider destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); Configuration examplePlugin = configureExamplePlugin(project); @@ -115,7 +117,7 @@ public void apply(Project project) { lifecycleTask.configure(t -> t.dependsOn(destructiveTask)); if ((type == DEB || type == RPM) && distribution.getBundledJdk()) { - for (Version version : BuildParams.getBwcVersions().getIndexCompatible()) { + for (Version version : buildParams.getBwcVersions().getIndexCompatible()) { final ElasticsearchDistribution bwcDistro; if (version.equals(Version.fromString(distribution.getVersion()))) { // this is the same as the distribution we are testing @@ -156,10 +158,10 @@ private static Map> lifecycleTask return lifecyleTasks; } - private static Map> versionTasks(Project project, String taskPrefix) { + private static Map> versionTasks(Project project, String taskPrefix, BwcVersions bwcVersions) { Map> versionTasks = new HashMap<>(); - for (Version version : BuildParams.getBwcVersions().getIndexCompatible()) { + for (Version version : bwcVersions.getIndexCompatible()) { versionTasks.put(version.toString(), project.getTasks().register(taskPrefix + ".v" + version)); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java index 3619c9c1ec76d..e13c2544ae9cf 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.gradle.internal.test; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; @@ -18,16 +18,21 @@ import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.testing.Test; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class InternalClusterTestPlugin implements Plugin { public static final String SOURCE_SET_NAME = "internalClusterTest"; @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); + TaskProvider internalClusterTest = GradleUtils.addTestSourceSet(project, SOURCE_SET_NAME); internalClusterTest.configure(task -> { // Set GC options to mirror defaults in jvm.options - if (BuildParams.getRuntimeJavaVersion().compareTo(JavaVersion.VERSION_14) < 0) { + if (buildParams.getRuntimeJavaVersion().get().compareTo(JavaVersion.VERSION_14) < 0) { task.jvmArgs("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", "-XX:+UseCMSInitiatingOccupancyOnly"); } else { task.jvmArgs("-XX:+UseG1GC"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java index fddddbd14d3ab..c13a5f0e4d30d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.gradle.internal.test; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.file.RegularFile; @@ -19,6 +19,8 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class MutedTestPlugin implements Plugin { private static final String ADDITIONAL_FILES_PROPERTY = "org.elasticsearch.additional.muted.tests"; @@ -32,6 +34,9 @@ public void apply(Project project) { .map(p -> project.getRootProject().getLayout().getProjectDirectory().file(p)) .toList(); + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); + Provider mutedTestsProvider = project.getGradle() .getSharedServices() .registerIfAbsent("mutedTests", MutedTestsBuildService.class, spec -> { @@ -46,7 +51,7 @@ public void apply(Project project) { } // Don't fail when all tests are ignored when running in CI - filter.setFailOnNoMatchingTests(BuildParams.isCi() == false); + filter.setFailOnNoMatchingTests(buildParams.isCi() == false); }); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java index 564465fbb2554..68711881b02f4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.precommit.FilePermissionsPrecommitPlugin; import org.elasticsearch.gradle.internal.precommit.ForbiddenPatternsPrecommitPlugin; import org.elasticsearch.gradle.internal.precommit.ForbiddenPatternsTask; @@ -35,6 +35,7 @@ public class TestWithSslPlugin implements Plugin { @Override public void apply(Project project) { File keyStoreDir = new File(project.getBuildDir(), "keystore"); + BuildParameterExtension buildParams = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class); TaskProvider exportKeyStore = project.getTasks() .register("copyTestCertificates", ExportElasticsearchBuildResourcesTask.class, (t) -> { t.copy("test/ssl/test-client.crt"); @@ -87,7 +88,7 @@ public void apply(Project project) { .getExtensions() .getByName(TestClustersPlugin.EXTENSION_NAME); clusters.configureEach(c -> { - if (BuildParams.isInFipsJvm()) { + if (buildParams.getInFipsJvm()) { c.setting("xpack.security.transport.ssl.key", "test-node.key"); c.keystore("xpack.security.transport.ssl.secure_key_passphrase", "test-node-key-password"); c.setting("xpack.security.transport.ssl.certificate", "test-node.crt"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 777a6d931e50e..548791b9496c2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -20,7 +20,6 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener; import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; @@ -58,6 +57,8 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + /** * Base plugin used for wiring up build tasks to REST testing tasks using new JUnit rule-based test clusters framework. */ @@ -92,6 +93,7 @@ public RestTestBasePlugin(ProviderFactory providerFactory) { public void apply(Project project) { project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); + var bwcVersions = loadBuildParams(project).get().getBwcVersions(); // Register integ-test and default distributions ElasticsearchDistribution defaultDistro = createDistribution( @@ -176,7 +178,7 @@ public void apply(Project project) { task.systemProperty("tests.system_call_filter", "false"); // Pass minimum wire compatible version which is used by upgrade tests - task.systemProperty(MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP, BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + task.systemProperty(MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP, bwcVersions.getMinimumWireCompatibleVersion()); // Register plugins and modules as task inputs and pass paths as system properties to tests var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); @@ -223,7 +225,7 @@ public Void call(Object... args) { } Version version = (Version) args[0]; - boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; + boolean isReleased = bwcVersions.unreleasedInfo(version) == null; String versionString = version.toString(); ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); @@ -235,9 +237,9 @@ public Void call(Object... args) { providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) ); - if (version.getMajor() > 0 && version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { + if (version.getMajor() > 0 && version.before(bwcVersions.getMinimumWireCompatibleVersion())) { // If we are upgrade testing older versions we also need to upgrade to 7.last - this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + this.call(bwcVersions.getMinimumWireCompatibleVersion()); } return null; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java index fe305b8b46cf7..61dea47eb15c1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java @@ -11,7 +11,8 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask; import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask; import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; @@ -47,6 +48,7 @@ import javax.inject.Inject; import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupYamlRestTestDependenciesDefaults; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. @@ -74,6 +76,8 @@ public AbstractYamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperati @Override public void apply(Project project) { + project.getRootProject().getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + BuildParameterExtension buildParams = loadBuildParams(project).get(); final Path compatRestResourcesDir = Path.of("restResources").resolve("compat"); final Path compatSpecsDir = compatRestResourcesDir.resolve("yamlSpecs"); @@ -91,14 +95,14 @@ public void apply(Project project) { GradleUtils.extendSourceSet(project, YamlRestTestPlugin.YAML_REST_TEST, SOURCE_SET_NAME); // determine the previous rest compatibility version and BWC project path - int currentMajor = BuildParams.getBwcVersions().getCurrentVersion().getMajor(); - Version lastMinor = BuildParams.getBwcVersions() + int currentMajor = buildParams.getBwcVersions().getCurrentVersion().getMajor(); + Version lastMinor = buildParams.getBwcVersions() .getUnreleased() .stream() .filter(v -> v.getMajor() == currentMajor - 1) .min(Comparator.reverseOrder()) .get(); - String lastMinorProjectPath = BuildParams.getBwcVersions().unreleasedInfo(lastMinor).gradleProjectPath(); + String lastMinorProjectPath = buildParams.getBwcVersions().unreleasedInfo(lastMinor).gradleProjectPath(); // copy compatible rest specs Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java index 53fb4c61e151c..a934164d11af6 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java @@ -12,7 +12,7 @@ import org.apache.commons.lang.StringUtils; import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.internal.docker.DockerBuildTask; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -20,6 +20,8 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class TestFixturesDeployPlugin implements Plugin { public static final String DEPLOY_FIXTURE_TASK_NAME = "deployFixtureDockerImages"; @@ -27,13 +29,19 @@ public class TestFixturesDeployPlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); NamedDomainObjectContainer fixtures = project.container(TestFixtureDeployment.class); project.getExtensions().add("dockerFixtures", fixtures); - registerDeployTaskPerFixture(project, fixtures); + registerDeployTaskPerFixture(project, fixtures, buildParams.isCi()); project.getTasks().register(DEPLOY_FIXTURE_TASK_NAME, task -> task.dependsOn(project.getTasks().withType(DockerBuildTask.class))); } - private static void registerDeployTaskPerFixture(Project project, NamedDomainObjectContainer fixtures) { + private static void registerDeployTaskPerFixture( + Project project, + NamedDomainObjectContainer fixtures, + boolean isCi + ) { fixtures.all( fixture -> project.getTasks() .register("deploy" + StringUtils.capitalize(fixture.getName()) + "DockerImage", DockerBuildTask.class, task -> { @@ -42,12 +50,12 @@ private static void registerDeployTaskPerFixture(Project project, NamedDomainObj if (baseImages.isEmpty() == false) { task.setBaseImages(baseImages.toArray(new String[baseImages.size()])); } - task.setNoCache(BuildParams.isCi()); + task.setNoCache(isCi); task.setTags( new String[] { resolveTargetDockerRegistry(fixture) + "/" + fixture.getName() + "-fixture:" + fixture.getVersion().get() } ); - task.getPush().set(BuildParams.isCi()); + task.getPush().set(isCi); task.getPlatforms().addAll(Arrays.stream(Architecture.values()).map(a -> a.dockerPlatform).toList()); task.setGroup("Deploy TestFixtures"); task.setDescription("Deploys the " + fixture.getName() + " test fixture"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index 504b081fd505d..ab28a66d93065 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -17,7 +17,7 @@ import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Action; @@ -47,6 +47,8 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class TestFixturesPlugin implements Plugin { private static final Logger LOGGER = Logging.getLogger(TestFixturesPlugin.class); @@ -68,6 +70,8 @@ protected FileSystemOperations getFileSystemOperations() { @Override public void apply(Project project) { project.getRootProject().getPluginManager().apply(DockerSupportPlugin.class); + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); TaskContainer tasks = project.getTasks(); Provider dockerComposeThrottle = project.getGradle() @@ -127,7 +131,7 @@ public void apply(Project project) { tasks.withType(ComposeUp.class).named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions - if (BuildParams.isCi()) { + if (buildParams.isCi()) { t.usesService(dockerComposeThrottle); t.usesService(dockerSupport); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java new file mode 100644 index 0000000000000..1b019a6cbd3e6 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.util; + +public class CiUtils { + + static String safeName(String input) { + return input.replaceAll("[^a-zA-Z0-9_\\-\\.]+", " ").trim().replaceAll(" ", "_").toLowerCase(); + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java new file mode 100644 index 0000000000000..0afe654bc5fbc --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.util; + +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.BuildParameterService; +import org.gradle.api.Project; +import org.gradle.api.provider.Property; +import org.gradle.api.services.BuildServiceRegistration; + +public class ParamsUtils { + + public static Property loadBuildParams(Project project) { + BuildServiceRegistration buildParamsRegistrations = (BuildServiceRegistration< + BuildParameterService, + BuildParameterService.Params>) project.getGradle().getSharedServices().getRegistrations().getByName("buildParams"); + Property buildParams = buildParamsRegistrations.getParameters().getBuildParams(); + return buildParams; + } + +} diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 5388f942be8d7..a9da7995c2b36 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -119,7 +119,7 @@ java.time.zone.ZoneRules#getStandardOffset(java.time.Instant) java.time.zone.ZoneRules#getDaylightSavings(java.time.Instant) java.time.zone.ZoneRules#isDaylightSavings(java.time.Instant) -@defaultMessage Use logger methods with non-Object parameter +@defaultMessage The first parameter to a log4j log statement should be a String, a log4j Supplier (not java.util.function.Supplier), or another object that log4j supports. org.apache.logging.log4j.Logger#trace(java.lang.Object) org.apache.logging.log4j.Logger#trace(java.lang.Object, java.lang.Throwable) org.apache.logging.log4j.Logger#debug(java.lang.Object) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index c3511dd5d256c..29c5bc16a8c4a 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -14,7 +14,7 @@ log4j = 2.19.0 slf4j = 2.0.6 ecsLogging = 1.2.0 jna = 5.12.1 -netty = 4.1.109.Final +netty = 4.1.115.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 diff --git a/build-tools/build.gradle b/build-tools/build.gradle index 7fd01f0c3d4f7..e457999fedfee 100644 --- a/build-tools/build.gradle +++ b/build-tools/build.gradle @@ -9,9 +9,6 @@ buildscript { repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() } } @@ -117,9 +114,6 @@ configurations { } repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() gradlePluginPortal() } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java index e3adfe8d28148..b3a792b418384 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java @@ -167,7 +167,7 @@ private static CopySpec createBundleSpec( copySpec.exclude("plugin-security.codebases"); }); bundleSpec.from( - (Callable>) () -> project.getPluginManager().hasPlugin("com.github.johnrengelman.shadow") + (Callable>) () -> project.getPluginManager().hasPlugin("com.gradleup.shadow") ? project.getTasks().named("shadowJar") : project.getTasks().named("jar") ); diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index d3d06b2de3575..f3f8e4703eba2 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -168,7 +168,6 @@ abstract class AbstractGradleFuncTest extends Specification { ${extraPlugins.collect { p -> "id '$p'" }.join('\n')} } import org.elasticsearch.gradle.Architecture - import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.Version @@ -182,7 +181,7 @@ abstract class AbstractGradleFuncTest extends Specification { ] BwcVersions versions = new BwcVersions(currentVersion, versionList) - BuildParams.init { it.setBwcVersions(provider(() -> versions)) } + buildParams.getBwcVersionsProperty().set(versions) """ } diff --git a/build.gradle b/build.gradle index a91347ca6e19b..715614c1beea4 100644 --- a/build.gradle +++ b/build.gradle @@ -17,7 +17,6 @@ import org.elasticsearch.gradle.DistributionDownloadPlugin import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.BaseInternalPluginBuildPlugin import org.elasticsearch.gradle.internal.ResolveAllDependencies -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.util.GradleUtils import org.gradle.plugins.ide.eclipse.model.AccessRule @@ -28,10 +27,6 @@ import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure buildscript { repositories { - maven { - url 'https://jitpack.io' - } - mavenCentral() } } @@ -143,23 +138,23 @@ tasks.register("updateCIBwcVersions") { } doLast { - writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) - writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible)) + writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.indexCompatible)) + writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible)) expandBwcList( ".buildkite/pipelines/intake.yml", ".buildkite/pipelines/intake.template.yml", - filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible) + filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible) ) writeBuildkitePipeline( ".buildkite/pipelines/periodic.yml", ".buildkite/pipelines/periodic.template.yml", [ - new ListExpansion(versions: filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible), variable: "BWC_LIST"), + new ListExpansion(versions: filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible), variable: "BWC_LIST"), ], [ new StepExpansion( templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", - versions: filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible), + versions: filterIntermediatePatches(buildParams.bwcVersions.indexCompatible), variable: "BWC_STEPS" ), ] @@ -169,7 +164,7 @@ tasks.register("updateCIBwcVersions") { ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", ".buildkite/pipelines/periodic-packaging.bwc.template.yml", - filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible) + filterIntermediatePatches(buildParams.bwcVersions.indexCompatible) ) } } @@ -191,19 +186,19 @@ tasks.register("verifyVersions") { // Fetch the metadata and parse the xml into Version instances because it's more straight forward here // rather than bwcVersion ( VersionCollection ). new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> - BuildParams.bwcVersions.compareToAuthoritative( + buildParams.bwcVersions.compareToAuthoritative( new XmlParser().parse(s) .versioning.versions.version .collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ } .collect { Version.fromString(it) } ) } - verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) - verifyCiYaml(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) + verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.indexCompatible)) + verifyCiYaml(file(".ci/snapshotBwcVersions"), buildParams.bwcVersions.unreleasedIndexCompatible) // Make sure backport bot config file is up to date JsonNode backportConfig = new ObjectMapper().readTree(file(".backportrc.json")) - BuildParams.bwcVersions.forPreviousUnreleased { unreleasedVersion -> + buildParams.bwcVersions.forPreviousUnreleased { unreleasedVersion -> boolean valid = backportConfig.get("targetBranchChoices").elements().any { branchChoice -> if (branchChoice.isObject()) { return branchChoice.get("name").textValue() == unreleasedVersion.branch @@ -420,8 +415,11 @@ gradle.projectsEvaluated { } } -tasks.named("validateChangelogs") { - onlyIf { project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'functionalTests' } == false } +tasks.named("validateChangelogs").configure { + def triggeredTaskNames = gradle.startParameter.taskNames + onlyIf { + triggeredTaskNames.any { it.startsWith("checkPart") || it == 'functionalTests' } == false + } } tasks.named("precommit") { diff --git a/distribution/build.gradle b/distribution/build.gradle index 5b865b36f9e4d..e3481706ef230 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -14,7 +14,6 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.ConcatFilesTask import org.elasticsearch.gradle.internal.DependenciesInfoPlugin import org.elasticsearch.gradle.internal.NoticeTask -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin import java.nio.file.Files @@ -208,7 +207,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { distro.copyModule(processDefaultOutputsTaskProvider, module) dependencies.add('featuresMetadata', module) - if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) { + if (module.name.startsWith('transport-') || (buildParams.snapshotBuild == false && module.name == 'apm')) { distro.copyModule(processIntegTestOutputsTaskProvider, module) } @@ -378,7 +377,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { exclude "**/platform/${excludePlatform}/**" } } - if (BuildParams.isSnapshotBuild()) { + if (buildParams.isSnapshotBuild()) { from(buildExternalTestModulesTaskProvider) } if (project.path.startsWith(':distribution:packages')) { diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 788e836f8f045..d73f9c395f15c 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -8,7 +8,6 @@ import org.elasticsearch.gradle.internal.docker.DockerSupportService import org.elasticsearch.gradle.internal.docker.ShellRetry import org.elasticsearch.gradle.internal.docker.TransformLog4jConfigFilter import org.elasticsearch.gradle.internal.docker.* -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.Architecture import java.nio.file.Path @@ -120,7 +119,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> // the image. When developing the Docker images, it's very tedious to completely rebuild // an image for every single change. Therefore, outside of CI, we fix the // build time to midnight so that the Docker build cache is usable. - def buildDate = BuildParams.isCi() ? BuildParams.buildDate : BuildParams.buildDate.truncatedTo(ChronoUnit.DAYS).toString() + def buildDate = buildParams.isCi() ? buildParams.buildDate : buildParams.buildDate.truncatedTo(ChronoUnit.DAYS).toString() return [ 'arch' : architecture.classifier, @@ -128,7 +127,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> 'bin_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'bin', 'build_date' : buildDate, 'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config', - 'git_revision' : BuildParams.gitRevision, + 'git_revision' : buildParams.gitRevision, 'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0', 'package_manager' : base.packageManager, 'docker_base' : base.name().toLowerCase(), @@ -390,7 +389,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { dockerContext.fileProvider(transformTask.map { Sync task -> task.getDestinationDir() }) - noCache = BuildParams.isCi() + noCache = buildParams.isCi() tags = generateTags(base, architecture) platforms.add(architecture.dockerPlatform) @@ -485,7 +484,7 @@ void addBuildEssDockerImageTask(Architecture architecture) { dockerContext.fileProvider(buildContextTask.map { it.getDestinationDir() }) - noCache = BuildParams.isCi() + noCache = buildParams.isCi() baseImages = [] tags = generateTags(dockerBase, architecture) platforms.add(architecture.dockerPlatform) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index e08f16c14ab88..918980fea616a 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -301,7 +301,7 @@ ospackage { url 'https://www.elastic.co/' // signing setup - if (project.hasProperty('signing.password') && BuildParams.isSnapshotBuild() == false) { + if (project.hasProperty('signing.password') && buildParams.isSnapshotBuild() == false) { signingKeyId = project.hasProperty('signing.keyId') ? project.property('signing.keyId') : 'D88E42B4' signingKeyPassphrase = project.property('signing.password') signingKeyRingFile = project.hasProperty('signing.secretKeyRingFile') ? diff --git a/docs/build.gradle b/docs/build.gradle index e495ecacce27b..dec0de8ffa844 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1,5 +1,4 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.doc.DocSnippetTask import static org.elasticsearch.gradle.testclusters.TestDistribution.DEFAULT @@ -29,7 +28,7 @@ ext.docsFileTree = fileTree(projectDir) { // These files simply don't pass yet. We should figure out how to fix them. exclude 'reference/watcher/reference/actions.asciidoc' exclude 'reference/rest-api/security/ssl.asciidoc' - if (BuildParams.inFipsJvm) { + if (buildParams.inFipsJvm) { // We don't support this component in FIPS 140 exclude 'reference/ingest/processors/attachment.asciidoc' // We can't conditionally control output, this would be missing the ingest-attachment component @@ -38,7 +37,7 @@ ext.docsFileTree = fileTree(projectDir) { } tasks.named("yamlRestTest") { - if (BuildParams.isSnapshotBuild() == false) { + if (buildParams.isSnapshotBuild() == false) { // LOOKUP is not available in snapshots systemProperty 'tests.rest.blacklist', [ "reference/esql/processing-commands/lookup/esql-lookup-example" @@ -83,7 +82,7 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.history_index_enabled', 'false' keystorePassword 'keystore-password' - if (BuildParams.isSnapshotBuild() == false) { + if (buildParams.isSnapshotBuild() == false) { requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) } } @@ -170,7 +169,7 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { return } // Do not install ingest-attachment in a FIPS 140 JVM as this is not supported - if (subproj.path.startsWith(':modules:ingest-attachment') && BuildParams.inFipsJvm) { + if (subproj.path.startsWith(':modules:ingest-attachment') && buildParams.inFipsJvm) { return } plugin subproj.path diff --git a/docs/changelog/104683.yaml b/docs/changelog/104683.yaml new file mode 100644 index 0000000000000..d4f40b59cfd91 --- /dev/null +++ b/docs/changelog/104683.yaml @@ -0,0 +1,5 @@ +pr: 104683 +summary: "Feature: re-structure document ID generation favoring _id inverted index compression" +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/106520.yaml b/docs/changelog/106520.yaml deleted file mode 100644 index c3fe69a4c3dbd..0000000000000 --- a/docs/changelog/106520.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106520 -summary: Updated the transport CA name in Security Auto-Configuration. -area: Security -type: bug -issues: - - 106455 diff --git a/docs/changelog/107047.yaml b/docs/changelog/107047.yaml deleted file mode 100644 index 89caed6f55074..0000000000000 --- a/docs/changelog/107047.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107047 -summary: "Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields" -area: Search -type: bug -issues: - - 106846 diff --git a/docs/changelog/107936.yaml b/docs/changelog/107936.yaml deleted file mode 100644 index 89dd57f7a81a5..0000000000000 --- a/docs/changelog/107936.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107936 -summary: Two empty mappings now are created equally -area: Mapping -type: bug -issues: - - 107031 diff --git a/docs/changelog/109017.yaml b/docs/changelog/109017.yaml deleted file mode 100644 index 80bcdd6fc0e25..0000000000000 --- a/docs/changelog/109017.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109017 -summary: "ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security\ - \ solution" -area: ES|QL -type: "feature" -issues: [ ] diff --git a/docs/changelog/109193.yaml b/docs/changelog/109193.yaml deleted file mode 100644 index 5cc664eaee2cd..0000000000000 --- a/docs/changelog/109193.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109193 -summary: "[ES|QL] explicit cast a string literal to `date_period` and `time_duration`\ - \ in arithmetic operations" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml deleted file mode 100644 index 81b7541bde35b..0000000000000 --- a/docs/changelog/109414.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109414 -summary: Don't fail retention lease sync actions due to capacity constraints -area: CRUD -type: bug -issues: - - 105926 diff --git a/docs/changelog/109583.yaml b/docs/changelog/109583.yaml deleted file mode 100644 index 84757e307b4fb..0000000000000 --- a/docs/changelog/109583.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 109583 -summary: "ESQL: INLINESTATS" -area: ES|QL -type: feature -issues: - - 107589 -highlight: - title: "ESQL: INLINESTATS" - body: |- - This adds the `INLINESTATS` command to ESQL which performs a STATS and - then enriches the results into the output stream. So, this query: - - [source,esql] - ---- - FROM test - | INLINESTATS m=MAX(a * b) BY b - | WHERE m == a * b - | SORT a DESC, b DESC - | LIMIT 3 - ---- - - Produces output like: - - | a | b | m | - | --- | --- | ----- | - | 99 | 999 | 98901 | - | 99 | 998 | 98802 | - | 99 | 997 | 98703 | - notable: true diff --git a/docs/changelog/109667.yaml b/docs/changelog/109667.yaml deleted file mode 100644 index 782a1b1cf6c9b..0000000000000 --- a/docs/changelog/109667.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109667 -summary: Inference autoscaling -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/109684.yaml b/docs/changelog/109684.yaml deleted file mode 100644 index 156f568290cf5..0000000000000 --- a/docs/changelog/109684.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109684 -summary: Avoid `ModelAssignment` deadlock -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110021.yaml b/docs/changelog/110021.yaml deleted file mode 100644 index 51878b960dfd0..0000000000000 --- a/docs/changelog/110021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110021 -summary: "[ES|QL] validate `mv_sort` order" -area: ES|QL -type: bug -issues: - - 109910 diff --git a/docs/changelog/110116.yaml b/docs/changelog/110116.yaml deleted file mode 100644 index 9c309b8b80311..0000000000000 --- a/docs/changelog/110116.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110116 -summary: "[ESQL] Make query wrapped by `SingleValueQuery` cacheable" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110216.yaml b/docs/changelog/110216.yaml deleted file mode 100644 index 00ab20b230e2c..0000000000000 --- a/docs/changelog/110216.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110216 -summary: Register SLM run before snapshotting to save stats -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/110237.yaml b/docs/changelog/110237.yaml deleted file mode 100644 index 076855385376c..0000000000000 --- a/docs/changelog/110237.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 110237 -summary: Optimize the loop processing of URL decoding -area: Infra/REST API -type: enhancement -issues: - - 110235 - diff --git a/docs/changelog/110399.yaml b/docs/changelog/110399.yaml deleted file mode 100644 index 9e04e2656809e..0000000000000 --- a/docs/changelog/110399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110399 -summary: "[Inference API] Prevent inference endpoints from being deleted if they are\ - \ referenced by semantic text" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110427.yaml b/docs/changelog/110427.yaml deleted file mode 100644 index ba8a1246e90e4..0000000000000 --- a/docs/changelog/110427.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110427 -summary: "[Inference API] Remove unused Cohere rerank service settings fields in a\ - \ BWC way" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110520.yaml b/docs/changelog/110520.yaml deleted file mode 100644 index fba4b84e2279e..0000000000000 --- a/docs/changelog/110520.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110520 -summary: Add protection for OOM during aggregations partial reduction -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml deleted file mode 100644 index 6274c99b09998..0000000000000 --- a/docs/changelog/110524.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110524 -summary: Introduce mode `subobjects=auto` for objects -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110527.yaml b/docs/changelog/110527.yaml deleted file mode 100644 index 3ab19ecaaaa76..0000000000000 --- a/docs/changelog/110527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110527 -summary: "ESQL: Add boolean support to Max and Min aggs" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110554.yaml b/docs/changelog/110554.yaml deleted file mode 100644 index 8c0b896a4c979..0000000000000 --- a/docs/changelog/110554.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110554 -summary: Fix `MapperBuilderContext#isDataStream` when used in dynamic mappers -area: "Mapping" -type: bug -issues: [] diff --git a/docs/changelog/110574.yaml b/docs/changelog/110574.yaml deleted file mode 100644 index 1840838500151..0000000000000 --- a/docs/changelog/110574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110574 -summary: "ES|QL: better validation for GROK patterns" -area: ES|QL -type: bug -issues: - - 110533 diff --git a/docs/changelog/110578.yaml b/docs/changelog/110578.yaml deleted file mode 100644 index 5d48171e4f328..0000000000000 --- a/docs/changelog/110578.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110578 -summary: Add `size_in_bytes` to enrich cache stats -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/110593.yaml b/docs/changelog/110593.yaml deleted file mode 100644 index 21a5d426ceb46..0000000000000 --- a/docs/changelog/110593.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110593 -summary: "[ES|QL] add tests for stats by constant" -area: ES|QL -type: bug -issues: - - 105383 diff --git a/docs/changelog/110603.yaml b/docs/changelog/110603.yaml deleted file mode 100644 index 4ba19985853df..0000000000000 --- a/docs/changelog/110603.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110603 -summary: Stop iterating over all fields to extract @timestamp value -area: TSDB -type: enhancement -issues: - - 92297 diff --git a/docs/changelog/110606.yaml b/docs/changelog/110606.yaml deleted file mode 100644 index d4ab5234289c4..0000000000000 --- a/docs/changelog/110606.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110606 -summary: Adding mapping validation to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/110630.yaml b/docs/changelog/110630.yaml deleted file mode 100644 index 9bf78e1209753..0000000000000 --- a/docs/changelog/110630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110630 -summary: Telemetry for inference adaptive allocations -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml deleted file mode 100644 index d4d1dc68cdbcc..0000000000000 --- a/docs/changelog/110633.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110633 -summary: Add manage roles privilege -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/110669.yaml b/docs/changelog/110669.yaml deleted file mode 100644 index 301e756ca373c..0000000000000 --- a/docs/changelog/110669.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110669 -summary: "[ES|QL] Use `RangeQuery` and String in `BinaryComparison` on datetime fields" -area: ES|QL -type: bug -issues: - - 107900 diff --git a/docs/changelog/110676.yaml b/docs/changelog/110676.yaml deleted file mode 100644 index efe7e0e55f18f..0000000000000 --- a/docs/changelog/110676.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110676 -summary: Allow querying `index_mode` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110677.yaml b/docs/changelog/110677.yaml deleted file mode 100644 index 72fe5129f3b9d..0000000000000 --- a/docs/changelog/110677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110677 -summary: Add validation for synthetic source mode in logs mode indices -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/110718.yaml b/docs/changelog/110718.yaml deleted file mode 100644 index 526083a8add0c..0000000000000 --- a/docs/changelog/110718.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110718 -summary: "ESQL: Add boolean support to TOP aggregation" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110734.yaml b/docs/changelog/110734.yaml deleted file mode 100644 index d6dce144b89cd..0000000000000 --- a/docs/changelog/110734.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110734 -summary: Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/110796.yaml b/docs/changelog/110796.yaml deleted file mode 100644 index a54a9a08bbd27..0000000000000 --- a/docs/changelog/110796.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110796 -summary: Remove needless forking to GENERIC in `TransportMultiSearchAction` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110816.yaml b/docs/changelog/110816.yaml deleted file mode 100644 index bf707376ec9ea..0000000000000 --- a/docs/changelog/110816.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110816 -summary: GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml -area: Infra/Settings -type: bug -issues: - - 110815 diff --git a/docs/changelog/110829.yaml b/docs/changelog/110829.yaml deleted file mode 100644 index 365a14436ec89..0000000000000 --- a/docs/changelog/110829.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 110829 -summary: deprecate `edge_ngram` side parameter -area: Analysis -type: deprecation -issues: [] -deprecation: - title: deprecate `edge_ngram` side parameter - area: Analysis - details: edge_ngram will no longer accept the side parameter. - impact: Users will need to update any usage of edge_ngram token filter that utilizes `side`. If the `back` value was used, they can achieve the same behavior by using the `reverse` token filter. diff --git a/docs/changelog/110833.yaml b/docs/changelog/110833.yaml deleted file mode 100644 index 008fc489ed731..0000000000000 --- a/docs/changelog/110833.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110833 -summary: Make empty string searches be consistent with case (in)sensitivity -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110846.yaml b/docs/changelog/110846.yaml deleted file mode 100644 index 56cc65e83648c..0000000000000 --- a/docs/changelog/110846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110846 -summary: Fix MLTQuery handling of custom term frequencies -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml deleted file mode 100644 index 214adc97ac7cb..0000000000000 --- a/docs/changelog/110847.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110847 -summary: SLM Interval based scheduling -area: ILM+SLM -type: feature -issues: [] diff --git a/docs/changelog/110860.yaml b/docs/changelog/110860.yaml deleted file mode 100644 index 5649ca4c88362..0000000000000 --- a/docs/changelog/110860.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110860 -summary: Speedup `CanMatchPreFilterSearchPhase` constructor -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110879.yaml b/docs/changelog/110879.yaml deleted file mode 100644 index d114c6c2aa472..0000000000000 --- a/docs/changelog/110879.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110879 -summary: Add EXP ES|QL function -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110901.yaml b/docs/changelog/110901.yaml deleted file mode 100644 index 599cb7ce9ec98..0000000000000 --- a/docs/changelog/110901.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 110901 -summary: Set lenient to true by default when using updateable synonyms -area: Analysis -type: breaking -issues: [] -breaking: - title: Set lenient to true by default when using updateable synonyms - area: Analysis - details: | - When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient` - value will now be `true`. - impact: | - `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by - default. This prevents shard initialization errors on invalid synonyms. - notable: true diff --git a/docs/changelog/110921.yaml b/docs/changelog/110921.yaml deleted file mode 100644 index 28cd569404945..0000000000000 --- a/docs/changelog/110921.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110921 -summary: "ESQL: Support IP fields in MAX and MIN aggregations" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110928.yaml b/docs/changelog/110928.yaml deleted file mode 100644 index dcb2df6e6cca9..0000000000000 --- a/docs/changelog/110928.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110928 -summary: Dense vector field types updatable for int4 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/110951.yaml b/docs/changelog/110951.yaml deleted file mode 100644 index ec8bc9cae6347..0000000000000 --- a/docs/changelog/110951.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110951 -summary: Allow task canceling of validate API calls -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/110971.yaml b/docs/changelog/110971.yaml deleted file mode 100644 index 3579f77dc0d1d..0000000000000 --- a/docs/changelog/110971.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110971 -summary: "Search in ES|QL: Add MATCH operator" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110974.yaml b/docs/changelog/110974.yaml deleted file mode 100644 index c9e8c9b78675e..0000000000000 --- a/docs/changelog/110974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110974 -summary: Add custom rule parameters to force time shift -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110986.yaml b/docs/changelog/110986.yaml deleted file mode 100644 index 4e320b19c9578..0000000000000 --- a/docs/changelog/110986.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110986 -summary: Fix unnecessary mustache template evaluation -area: Ingest Node -type: enhancement -issues: - - 110191 diff --git a/docs/changelog/110993.yaml b/docs/changelog/110993.yaml deleted file mode 100644 index 9eb653a09e3a4..0000000000000 --- a/docs/changelog/110993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110993 -summary: Add link to Max Shards Per Node exception message -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/111015.yaml b/docs/changelog/111015.yaml deleted file mode 100644 index 3cc363c8bbf6b..0000000000000 --- a/docs/changelog/111015.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 111015 -summary: Always allow rebalancing by default -area: Allocation -type: enhancement -issues: [] -highlight: - title: Always allow rebalancing by default - body: |- - In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to - `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was - appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has - better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some - shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From - version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. - notable: true diff --git a/docs/changelog/111064.yaml b/docs/changelog/111064.yaml deleted file mode 100644 index 848da842b090e..0000000000000 --- a/docs/changelog/111064.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111064 -summary: "ESQL: Fix Double operations returning infinite" -area: ES|QL -type: bug -issues: - - 111026 diff --git a/docs/changelog/111071.yaml b/docs/changelog/111071.yaml deleted file mode 100644 index 5e8ab53db3d03..0000000000000 --- a/docs/changelog/111071.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111071 -summary: Use native scalar scorer for int8_flat index -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/111079.yaml b/docs/changelog/111079.yaml deleted file mode 100644 index aac22005f912d..0000000000000 --- a/docs/changelog/111079.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111079 -summary: PUT slm policy should only increase version if actually changed -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/111091.yaml b/docs/changelog/111091.yaml deleted file mode 100644 index 8444681a14a48..0000000000000 --- a/docs/changelog/111091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111091 -summary: "X-pack/plugin/otel: introduce x-pack-otel plugin" -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/111105.yaml b/docs/changelog/111105.yaml deleted file mode 100644 index ed32bd1ef7fc3..0000000000000 --- a/docs/changelog/111105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111105 -summary: "ESQL: TOP aggregation IP support" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111118.yaml b/docs/changelog/111118.yaml deleted file mode 100644 index c9fe6cb443688..0000000000000 --- a/docs/changelog/111118.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111118 -summary: "[ES|QL] Simplify patterns for subfields" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/111123.yaml b/docs/changelog/111123.yaml deleted file mode 100644 index 605b8607f4082..0000000000000 --- a/docs/changelog/111123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111123 -summary: Add Lucene segment-level fields stats -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111154.yaml b/docs/changelog/111154.yaml deleted file mode 100644 index 3297f5005a811..0000000000000 --- a/docs/changelog/111154.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111154 -summary: EIS integration -area: Inference -type: feature -issues: [] diff --git a/docs/changelog/111161.yaml b/docs/changelog/111161.yaml deleted file mode 100644 index c081d555ff1ee..0000000000000 --- a/docs/changelog/111161.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111161 -summary: Add support for templates when validating mappings in the simulate ingest - API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/111181.yaml b/docs/changelog/111181.yaml deleted file mode 100644 index 7f9f5937b7652..0000000000000 --- a/docs/changelog/111181.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111181 -summary: "[Inference API] Add Alibaba Cloud AI Search Model support to Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml deleted file mode 100644 index 9e56facb60d3a..0000000000000 --- a/docs/changelog/111193.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111193 -summary: Fix cases of collections with one point -area: Geo -type: bug -issues: - - 110982 diff --git a/docs/changelog/111212.yaml b/docs/changelog/111212.yaml deleted file mode 100644 index 67d1513b3ff6f..0000000000000 --- a/docs/changelog/111212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111212 -summary: Fix score count validation in reranker response -area: Ranking -type: bug -issues: - - 111202 diff --git a/docs/changelog/111215.yaml b/docs/changelog/111215.yaml deleted file mode 100644 index dc044c2283fc4..0000000000000 --- a/docs/changelog/111215.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111215 -summary: Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY` - isn't changed as side-effect -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/111225.yaml b/docs/changelog/111225.yaml deleted file mode 100644 index bcd344847cfd2..0000000000000 --- a/docs/changelog/111225.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111225 -summary: Upgrade Azure SDK -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/111226.yaml b/docs/changelog/111226.yaml deleted file mode 100644 index 1021a26fa789f..0000000000000 --- a/docs/changelog/111226.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111226 -summary: "ES|QL: add Telemetry API and track top functions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111238.yaml b/docs/changelog/111238.yaml deleted file mode 100644 index b918b754ff595..0000000000000 --- a/docs/changelog/111238.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111238 -summary: Fix validation of TEXT fields with case insensitive comparison -area: EQL -type: bug -issues: - - 111235 diff --git a/docs/changelog/111245.yaml b/docs/changelog/111245.yaml deleted file mode 100644 index 384373d52cb20..0000000000000 --- a/docs/changelog/111245.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111245 -summary: Truncating watcher history if it is too large -area: Watcher -type: bug -issues: - - 94745 diff --git a/docs/changelog/111274.yaml b/docs/changelog/111274.yaml deleted file mode 100644 index e26bcc03ce118..0000000000000 --- a/docs/changelog/111274.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111274 -summary: Include account name in Azure settings exceptions -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/111284.yaml b/docs/changelog/111284.yaml deleted file mode 100644 index f87649a134af6..0000000000000 --- a/docs/changelog/111284.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111284 -summary: Update `semantic_text` field to support indexing numeric and boolean data - types -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111311.yaml b/docs/changelog/111311.yaml deleted file mode 100644 index 5786e11e885e2..0000000000000 --- a/docs/changelog/111311.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111311 -summary: Adding support for data streams with a match-all template -area: Data streams -type: bug -issues: - - 111204 diff --git a/docs/changelog/111315.yaml b/docs/changelog/111315.yaml deleted file mode 100644 index 0e2e56898b51c..0000000000000 --- a/docs/changelog/111315.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111315 -summary: Add link to flood-stage watermark exception message -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/111316.yaml b/docs/changelog/111316.yaml deleted file mode 100644 index 0d915cd1ec3ea..0000000000000 --- a/docs/changelog/111316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111316 -summary: "[Service Account] Add `AutoOps` account" -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/111336.yaml b/docs/changelog/111336.yaml deleted file mode 100644 index d5bf602cb7a88..0000000000000 --- a/docs/changelog/111336.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111336 -summary: Use the same chunking configurations for models in the Elasticsearch service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/111344.yaml b/docs/changelog/111344.yaml deleted file mode 100644 index 3d5988054749d..0000000000000 --- a/docs/changelog/111344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111344 -summary: Add support for Azure Managed Identity -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/111367.yaml b/docs/changelog/111367.yaml deleted file mode 100644 index 89e6c1d3b4da4..0000000000000 --- a/docs/changelog/111367.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111367 -summary: "ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml deleted file mode 100644 index 297fa77cd2664..0000000000000 --- a/docs/changelog/111412.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111412 -summary: Make enrich cache based on memory usage -area: Ingest Node -type: enhancement -issues: - - 106081 diff --git a/docs/changelog/111413.yaml b/docs/changelog/111413.yaml deleted file mode 100644 index 0eae45b17d0c4..0000000000000 --- a/docs/changelog/111413.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111413 -summary: "ESQL: Fix synthetic attribute pruning" -area: ES|QL -type: bug -issues: - - 105821 diff --git a/docs/changelog/111420.yaml b/docs/changelog/111420.yaml deleted file mode 100644 index 4e2640ac5762a..0000000000000 --- a/docs/changelog/111420.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111420 -summary: "[Query rules] Add `exclude` query rule type" -area: Relevance -type: feature -issues: [] diff --git a/docs/changelog/111437.yaml b/docs/changelog/111437.yaml deleted file mode 100644 index a50312ffdd1aa..0000000000000 --- a/docs/changelog/111437.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111437 -summary: "[ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111445.yaml b/docs/changelog/111445.yaml deleted file mode 100644 index 9ba8e4371bd0c..0000000000000 --- a/docs/changelog/111445.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111445 -summary: Support booleans in routing path -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/111457.yaml b/docs/changelog/111457.yaml deleted file mode 100644 index f4ad4ee53eb0a..0000000000000 --- a/docs/changelog/111457.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111457 -summary: Add support for boolean dimensions -area: TSDB -type: enhancement -issues: - - 111338 diff --git a/docs/changelog/111465.yaml b/docs/changelog/111465.yaml deleted file mode 100644 index 2a8df287427a9..0000000000000 --- a/docs/changelog/111465.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111465 -summary: Add range and regexp Intervals -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111490.yaml b/docs/changelog/111490.yaml deleted file mode 100644 index b67c16189cc62..0000000000000 --- a/docs/changelog/111490.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111490 -summary: Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/111501.yaml b/docs/changelog/111501.yaml deleted file mode 100644 index a424142376e52..0000000000000 --- a/docs/changelog/111501.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111501 -summary: "[ES|QL] Combine Disjunctive CIDRMatch" -area: ES|QL -type: enhancement -issues: - - 105143 diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml deleted file mode 100644 index 96e8bd843f750..0000000000000 --- a/docs/changelog/111516.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111516 -summary: Adding support for `allow_partial_search_results` in PIT -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml deleted file mode 100644 index 202d16c5a426d..0000000000000 --- a/docs/changelog/111523.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111523 -summary: Search coordinator uses `event.ingested` in cluster state to do rewrites -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml deleted file mode 100644 index d4c46f485e664..0000000000000 --- a/docs/changelog/111544.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111544 -summary: "ESQL: Strings support for MAX and MIN aggregations" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111552.yaml b/docs/changelog/111552.yaml deleted file mode 100644 index d9991788d4fa9..0000000000000 --- a/docs/changelog/111552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111552 -summary: Siem ea 9521 improve test -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111576.yaml b/docs/changelog/111576.yaml deleted file mode 100644 index 6d3c331f4bbd5..0000000000000 --- a/docs/changelog/111576.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111576 -summary: Execute shard snapshot tasks in shard-id order -area: Snapshot/Restore -type: enhancement -issues: - - 108739 diff --git a/docs/changelog/111600.yaml b/docs/changelog/111600.yaml deleted file mode 100644 index 0c1e01e1c2e23..0000000000000 --- a/docs/changelog/111600.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111600 -summary: Make ecs@mappings work with OTel attributes -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/111624.yaml b/docs/changelog/111624.yaml deleted file mode 100644 index 7b04b244ef7a7..0000000000000 --- a/docs/changelog/111624.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111624 -summary: Extend logging for dropped warning headers -area: Infra/Core -type: enhancement -issues: - - 90527 diff --git a/docs/changelog/111644.yaml b/docs/changelog/111644.yaml deleted file mode 100644 index 3705d697c95e3..0000000000000 --- a/docs/changelog/111644.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111644 -summary: Force using the last centroid during merging -area: Aggregations -type: bug -issues: - - 111065 diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml deleted file mode 100644 index 077714d15a712..0000000000000 --- a/docs/changelog/111655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111655 -summary: Migrate Inference to `ChunkedToXContent` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/111683.yaml b/docs/changelog/111683.yaml deleted file mode 100644 index cbb2e5ad71ddc..0000000000000 --- a/docs/changelog/111683.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111683 -summary: Only emit product origin in deprecation log if present -area: Infra/Logging -type: bug -issues: - - 81757 diff --git a/docs/changelog/111689.yaml b/docs/changelog/111689.yaml deleted file mode 100644 index ccb3d4d4f87c5..0000000000000 --- a/docs/changelog/111689.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111689 -summary: Add nanos support to `ZonedDateTime` serialization -area: Infra/Core -type: enhancement -issues: - - 68292 diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml deleted file mode 100644 index 36e715744ad88..0000000000000 --- a/docs/changelog/111690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111690 -summary: "ESQL: Support INLINESTATS grouped on expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111740.yaml b/docs/changelog/111740.yaml deleted file mode 100644 index 48b7ee200e45e..0000000000000 --- a/docs/changelog/111740.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111740 -summary: Fix Start Trial API output acknowledgement header for features -area: License -type: bug -issues: - - 111739 diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml deleted file mode 100644 index 77e0c65005dd6..0000000000000 --- a/docs/changelog/111749.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111749 -summary: "ESQL: Added `mv_percentile` function" -area: ES|QL -type: feature -issues: - - 111591 diff --git a/docs/changelog/111770.yaml b/docs/changelog/111770.yaml deleted file mode 100644 index 8d6bde6b25ef9..0000000000000 --- a/docs/changelog/111770.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111770 -summary: Integrate IBM watsonx to Inference API for text embeddings -area: Experiences -type: enhancement -issues: [] diff --git a/docs/changelog/111779.yaml b/docs/changelog/111779.yaml deleted file mode 100644 index 52c635490e1e4..0000000000000 --- a/docs/changelog/111779.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111779 -summary: "ESQL: Fix serialization during `can_match`" -area: ES|QL -type: bug -issues: - - 111701 - - 111726 diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml deleted file mode 100644 index 00b793a19d9c3..0000000000000 --- a/docs/changelog/111797.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111797 -summary: "ESQL: fix for missing indices error message" -area: ES|QL -type: bug -issues: - - 111712 diff --git a/docs/changelog/111809.yaml b/docs/changelog/111809.yaml deleted file mode 100644 index 5a2f220e3a697..0000000000000 --- a/docs/changelog/111809.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111809 -summary: Add Field caps support for Semantic Text -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111818.yaml b/docs/changelog/111818.yaml deleted file mode 100644 index c3a632861aae6..0000000000000 --- a/docs/changelog/111818.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111818 -summary: Add tier preference to security index settings allowlist -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml deleted file mode 100644 index c40a9e2aef621..0000000000000 --- a/docs/changelog/111840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111840 -summary: "ESQL: Add async ID and `is_running` headers to ESQL async query" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml deleted file mode 100644 index 3f15e9c20135a..0000000000000 --- a/docs/changelog/111855.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111855 -summary: "ESQL: Profile more timing information" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml deleted file mode 100644 index 26ec90aa6cd4c..0000000000000 --- a/docs/changelog/111874.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 111874 -summary: "ESQL: BUCKET: allow numerical spans as whole numbers" -area: ES|QL -type: enhancement -issues: - - 104646 - - 109340 - - 105375 diff --git a/docs/changelog/111879.yaml b/docs/changelog/111879.yaml deleted file mode 100644 index b8c2111e1d286..0000000000000 --- a/docs/changelog/111879.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111879 -summary: "ESQL: Have BUCKET generate friendlier intervals" -area: ES|QL -type: enhancement -issues: - - 110916 diff --git a/docs/changelog/111915.yaml b/docs/changelog/111915.yaml deleted file mode 100644 index f64c45b82d10c..0000000000000 --- a/docs/changelog/111915.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111915 -summary: Fix DLS & FLS sometimes being enforced when it is disabled -area: Authorization -type: bug -issues: - - 94709 diff --git a/docs/changelog/111917.yaml b/docs/changelog/111917.yaml deleted file mode 100644 index 0dc760d76a698..0000000000000 --- a/docs/changelog/111917.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111917 -summary: "[ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and\ - \ In at Analyzer" -area: ES|QL -type: enhancement -issues: - - 111486 diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml deleted file mode 100644 index 7d856e29d54c5..0000000000000 --- a/docs/changelog/111937.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111937 -summary: Handle `BigInteger` in xcontent copy -area: Infra/Core -type: bug -issues: - - 111812 diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml deleted file mode 100644 index a3a592abaf1ca..0000000000000 --- a/docs/changelog/111948.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111948 -summary: Upgrade xcontent to Jackson 2.17.0 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml deleted file mode 100644 index 3f23c17d8e652..0000000000000 --- a/docs/changelog/111950.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111950 -summary: "[ES|QL] Name parameter with leading underscore" -area: ES|QL -type: enhancement -issues: - - 111821 diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml deleted file mode 100644 index ebc518203b7cc..0000000000000 --- a/docs/changelog/111955.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111955 -summary: Clean up dangling S3 multipart uploads -area: Snapshot/Restore -type: enhancement -issues: - - 101169 - - 44971 diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml deleted file mode 100644 index 9d758c76369e9..0000000000000 --- a/docs/changelog/111968.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111968 -summary: "ESQL: don't lose the original casting error message" -area: ES|QL -type: bug -issues: - - 111967 diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml deleted file mode 100644 index 2d276850c4988..0000000000000 --- a/docs/changelog/111969.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111969 -summary: "[Profiling] add `container.id` field to event index template" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml deleted file mode 100644 index a5bfcd5b0882e..0000000000000 --- a/docs/changelog/111972.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 111972 -summary: Introduce global retention in data stream lifecycle. -area: Data streams -type: feature -issues: [] -highlight: - title: Add global retention in data stream lifecycle - body: |- - Data stream lifecycle now supports configuring retention on a cluster level, - namely global retention. Global retention \nallows us to configure two different - retentions: - - - `data_streams.lifecycle.retention.default` is applied to all data streams managed - by the data stream lifecycle that do not have retention defined on the data stream level. - - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the - data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. - notable: true diff --git a/docs/changelog/111981.yaml b/docs/changelog/111981.yaml deleted file mode 100644 index 13b8fe4b7e38d..0000000000000 --- a/docs/changelog/111981.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111981 -summary: Allow fields with dots in sparse vector field mapper -area: Mapping -type: enhancement -issues: - - 109118 diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml deleted file mode 100644 index 7afb207864ed7..0000000000000 --- a/docs/changelog/112019.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112019 -summary: Display effective retention in the relevant data stream APIs -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112024.yaml b/docs/changelog/112024.yaml deleted file mode 100644 index e426693fba964..0000000000000 --- a/docs/changelog/112024.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112024 -summary: (API) Cluster Health report `unassigned_primary_shards` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/112026.yaml b/docs/changelog/112026.yaml deleted file mode 100644 index fedf001923ab4..0000000000000 --- a/docs/changelog/112026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112026 -summary: Create `StreamingHttpResultPublisher` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112055.yaml b/docs/changelog/112055.yaml deleted file mode 100644 index cdf15b3b37468..0000000000000 --- a/docs/changelog/112055.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112055 -summary: "ESQL: `mv_median_absolute_deviation` function" -area: ES|QL -type: feature -issues: - - 111590 diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml deleted file mode 100644 index e974b3413582e..0000000000000 --- a/docs/changelog/112058.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112058 -summary: Fix RRF validation for `rank_constant` < 1 -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/112063.yaml b/docs/changelog/112063.yaml deleted file mode 100644 index 190993967a074..0000000000000 --- a/docs/changelog/112063.yaml +++ /dev/null @@ -1,32 +0,0 @@ -pr: 112063 -summary: Spatial search functions support multi-valued fields in compute engine -area: ES|QL -type: bug -issues: - - 112102 - - 112505 - - 110830 -highlight: - title: "ESQL: Multi-value fields supported in Geospatial predicates" - body: |- - Supporting multi-value fields in `WHERE` predicates is a challenge due to not knowing whether `ALL` or `ANY` - of the values in the field should pass the predicate. - For example, should the field `age:[10,30]` pass the predicate `WHERE age>20` or not? - This ambiguity does not exist with the spatial predicates - `ST_INTERSECTS` and `ST_DISJOINT`, because the choice between `ANY` or `ALL` - is implied by the predicate itself. - Consider a predicate checking a field named `location` against a test geometry named `shape`: - - * `ST_INTERSECTS(field, shape)` - true if `ANY` value can intersect the shape - * `ST_DISJOINT(field, shape)` - true only if `ALL` values are disjoint from the shape - - This works even if the shape argument is itself a complex or compound geometry. - - Similar logic exists for `ST_CONTAINS` and `ST_WITHIN` predicates, but these are not as easily solved - with `ANY` or `ALL`, because a collection of geometries contains another collection if each of the contained - geometries is within at least one of the containing geometries. Evaluating this requires that the multi-value - field is first combined into a single geometry before performing the predicate check. - - * `ST_CONTAINS(field, shape)` - true if the combined geometry contains the shape - * `ST_WITHIN(field, shape)` - true if the combined geometry is within the shape - notable: false diff --git a/docs/changelog/112066.yaml b/docs/changelog/112066.yaml deleted file mode 100644 index 5dd846766bc8e..0000000000000 --- a/docs/changelog/112066.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112066 -summary: Do not treat replica as unassigned if primary recently created and unassigned - time is below a threshold -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/112081.yaml b/docs/changelog/112081.yaml deleted file mode 100644 index a4009e01fca71..0000000000000 --- a/docs/changelog/112081.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112081 -summary: "[ES|QL] Validate index name in parser" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112100.yaml b/docs/changelog/112100.yaml deleted file mode 100644 index 9135edecb4d77..0000000000000 --- a/docs/changelog/112100.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112100 -summary: Exclude internal data streams from global retention -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml deleted file mode 100644 index 0c0d7ac44cd17..0000000000000 --- a/docs/changelog/112123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112123 -summary: SLM interval schedule followup - add back `getFieldName` style getters -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml deleted file mode 100644 index f6a7aeb893a5e..0000000000000 --- a/docs/changelog/112126.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112126 -summary: Add support for spatial relationships in point field mapper -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml deleted file mode 100644 index 11109402b7373..0000000000000 --- a/docs/changelog/112133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112133 -summary: Add telemetry for repository usage -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/112151.yaml b/docs/changelog/112151.yaml deleted file mode 100644 index f5cbfd8da07c2..0000000000000 --- a/docs/changelog/112151.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112151 -summary: Store original source for keywords using a normalizer -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml deleted file mode 100644 index eb22f215f9828..0000000000000 --- a/docs/changelog/112199.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112199 -summary: Support docvalues only query in shape field -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml deleted file mode 100644 index 0c2c3d71e3ddf..0000000000000 --- a/docs/changelog/112200.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112200 -summary: "ES|QL: better validation of GROK patterns" -area: ES|QL -type: bug -issues: - - 112111 diff --git a/docs/changelog/112210.yaml b/docs/changelog/112210.yaml deleted file mode 100644 index 6483b8b01315c..0000000000000 --- a/docs/changelog/112210.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112210 -summary: Expose global retention settings via data stream lifecycle API -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml deleted file mode 100644 index 430f95a72bb3f..0000000000000 --- a/docs/changelog/112214.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112214 -summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml deleted file mode 100644 index c426dd7ade4ed..0000000000000 --- a/docs/changelog/112218.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 112218 -summary: "ESQL: Fix a bug in `MV_PERCENTILE`" -area: ES|QL -type: bug -issues: - - 112193 - - 112180 - - 112187 - - 112188 diff --git a/docs/changelog/112250.yaml b/docs/changelog/112250.yaml deleted file mode 100644 index edbb5667d4b9d..0000000000000 --- a/docs/changelog/112250.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112250 -summary: Do not exclude empty arrays or empty objects in source filtering -area: Search -type: bug -issues: [109668] diff --git a/docs/changelog/112262.yaml b/docs/changelog/112262.yaml deleted file mode 100644 index fe23c14c79c9e..0000000000000 --- a/docs/changelog/112262.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112262 -summary: Check for disabling own user in Put User API -area: Authentication -type: bug -issues: - - 90205 diff --git a/docs/changelog/112263.yaml b/docs/changelog/112263.yaml deleted file mode 100644 index 2d1321f327673..0000000000000 --- a/docs/changelog/112263.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112263 -summary: Fix `TokenService` always appearing used in Feature Usage -area: License -type: bug -issues: - - 61956 diff --git a/docs/changelog/112270.yaml b/docs/changelog/112270.yaml deleted file mode 100644 index 1e6b9c7fc9290..0000000000000 --- a/docs/changelog/112270.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112270 -summary: Support sparse embedding models in the elasticsearch inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112273.yaml b/docs/changelog/112273.yaml deleted file mode 100644 index 3182a1884a145..0000000000000 --- a/docs/changelog/112273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111181 -summary: "[Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/112277.yaml b/docs/changelog/112277.yaml deleted file mode 100644 index eac474555999a..0000000000000 --- a/docs/changelog/112277.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112277 -summary: Upgrade `repository-azure` dependencies -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/112282.yaml b/docs/changelog/112282.yaml deleted file mode 100644 index beea119b06aef..0000000000000 --- a/docs/changelog/112282.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112282 -summary: Adds example plugin for custom ingest processor -area: Ingest Node -type: enhancement -issues: - - 111539 diff --git a/docs/changelog/112294.yaml b/docs/changelog/112294.yaml deleted file mode 100644 index 71ce9eeef584c..0000000000000 --- a/docs/changelog/112294.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 112294 -summary: "Use fallback synthetic source for `copy_to` and doc_values: false cases" -area: Mapping -type: enhancement -issues: - - 110753 - - 110038 - - 109546 diff --git a/docs/changelog/112295.yaml b/docs/changelog/112295.yaml deleted file mode 100644 index ecbd365d03918..0000000000000 --- a/docs/changelog/112295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112295 -summary: "ESQL: Speed up CASE for some parameters" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112303.yaml b/docs/changelog/112303.yaml deleted file mode 100644 index a363e621e4c48..0000000000000 --- a/docs/changelog/112303.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112303 -summary: Add 'verbose' flag retrieving `maximum_timestamp` for get data stream API -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112320.yaml b/docs/changelog/112320.yaml deleted file mode 100644 index d35a08dfa4e91..0000000000000 --- a/docs/changelog/112320.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112320 -summary: Upgrade xcontent to Jackson 2.17.2 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/112330.yaml b/docs/changelog/112330.yaml deleted file mode 100644 index 498698f5175ba..0000000000000 --- a/docs/changelog/112330.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112330 -summary: Add links to network disconnect troubleshooting -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/112337.yaml b/docs/changelog/112337.yaml deleted file mode 100644 index f7d667e23cfe9..0000000000000 --- a/docs/changelog/112337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112337 -summary: Add workaround for missing shard gen blob -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/112341.yaml b/docs/changelog/112341.yaml deleted file mode 100644 index 8f44b53ad9998..0000000000000 --- a/docs/changelog/112341.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112341 -summary: Fix DLS using runtime fields and synthetic source -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/112345.yaml b/docs/changelog/112345.yaml deleted file mode 100644 index b922fe3754cbb..0000000000000 --- a/docs/changelog/112345.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 112345 -summary: Allow dimension fields to have multiple values in standard and logsdb index - mode -area: Mapping -type: enhancement -issues: - - 112232 - - 112239 diff --git a/docs/changelog/112348.yaml b/docs/changelog/112348.yaml deleted file mode 100644 index 84110a7cd4f1b..0000000000000 --- a/docs/changelog/112348.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112348 -summary: Introduce repository integrity verification API -area: Snapshot/Restore -type: enhancement -issues: - - 52622 diff --git a/docs/changelog/112350.yaml b/docs/changelog/112350.yaml deleted file mode 100644 index 994cd3a65c633..0000000000000 --- a/docs/changelog/112350.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112350 -summary: "[ESQL] Add `SPACE` function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112369.yaml b/docs/changelog/112369.yaml deleted file mode 100644 index fb1c4775f7a12..0000000000000 --- a/docs/changelog/112369.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112369 -summary: Register Task while Streaming -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112397.yaml b/docs/changelog/112397.yaml deleted file mode 100644 index e67478ec69b1c..0000000000000 --- a/docs/changelog/112397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112397 -summary: Control storing array source with index setting -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/112401.yaml b/docs/changelog/112401.yaml deleted file mode 100644 index 65e9e76ac25f6..0000000000000 --- a/docs/changelog/112401.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112401 -summary: "ESQL: Fix CASE when conditions are multivalued" -area: ES|QL -type: bug -issues: - - 112359 diff --git a/docs/changelog/112405.yaml b/docs/changelog/112405.yaml deleted file mode 100644 index 4e9f095fb80a8..0000000000000 --- a/docs/changelog/112405.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112405 -summary: Improve date expression/remote handling in index names -area: Search -type: bug -issues: - - 112243 diff --git a/docs/changelog/112409.yaml b/docs/changelog/112409.yaml deleted file mode 100644 index bad94b9f5f2be..0000000000000 --- a/docs/changelog/112409.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112409 -summary: Include reason when no nodes are found -area: "Transform" -type: bug -issues: - - 112404 diff --git a/docs/changelog/112412.yaml b/docs/changelog/112412.yaml deleted file mode 100644 index fda53ebd1ade0..0000000000000 --- a/docs/changelog/112412.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112412 -summary: Expose `HexFormat` in Painless -area: Infra/Scripting -type: enhancement -issues: [] diff --git a/docs/changelog/112431.yaml b/docs/changelog/112431.yaml deleted file mode 100644 index b8c1197bdc7ef..0000000000000 --- a/docs/changelog/112431.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112431 -summary: "Async search: Add ID and \"is running\" http headers" -area: Search -type: feature -issues: - - 109576 diff --git a/docs/changelog/112440.yaml b/docs/changelog/112440.yaml deleted file mode 100644 index f208474fa2686..0000000000000 --- a/docs/changelog/112440.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112440 -summary: "logs-apm.error-*: define log.level field as keyword" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/112451.yaml b/docs/changelog/112451.yaml deleted file mode 100644 index aa852cf5e2a1a..0000000000000 --- a/docs/changelog/112451.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 112451 -summary: Update data stream lifecycle telemetry to track global retention -area: Data streams -type: breaking -issues: [] -breaking: - title: Update data stream lifecycle telemetry to track global retention - area: REST API - details: |- - In this release we introduced global retention settings that fulfil the following criteria: - - - a data stream managed by the data stream lifecycle, - - a data stream that is not an internal data stream. - - As a result, we defined different types of retention: - - - **data retention**: the retention configured on data stream level by the data stream user or owner - - **default global retention:** the retention configured by an admin on a cluster level and applied to any - data stream that doesn't have data retention and fulfils the criteria. - - **max global retention:** the retention configured by an admin to guard against having long retention periods. - Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, - in which case the max global retention applies. - - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment - in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. - - Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we - renamed it to `data_retention` and added telemetry about the other configurations too. - impact: Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` - notable: false diff --git a/docs/changelog/112481.yaml b/docs/changelog/112481.yaml deleted file mode 100644 index 3e539ce8e4b75..0000000000000 --- a/docs/changelog/112481.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112481 -summary: Validate streaming HTTP Response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112489.yaml b/docs/changelog/112489.yaml deleted file mode 100644 index ebc84927b0e76..0000000000000 --- a/docs/changelog/112489.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112489 -summary: "ES|QL: better validation for RLIKE patterns" -area: ES|QL -type: bug -issues: - - 112485 diff --git a/docs/changelog/112508.yaml b/docs/changelog/112508.yaml deleted file mode 100644 index 3945ebd226ac4..0000000000000 --- a/docs/changelog/112508.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112508 -summary: "[ML] Create Inference API will no longer return model_id and now only return inference_id" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/112512.yaml b/docs/changelog/112512.yaml deleted file mode 100644 index a9812784ccfca..0000000000000 --- a/docs/changelog/112512.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112512 -summary: Add Completion Inference API for Alibaba Cloud AI Search Model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112519.yaml b/docs/changelog/112519.yaml deleted file mode 100644 index aa8a942ef0f58..0000000000000 --- a/docs/changelog/112519.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112519 -summary: Lower the memory footprint when creating `DelayedBucket` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112547.yaml b/docs/changelog/112547.yaml deleted file mode 100644 index 7f42f2a82976e..0000000000000 --- a/docs/changelog/112547.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112547 -summary: Remove reduce and `reduceContext` from `DelayedBucket` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112565.yaml b/docs/changelog/112565.yaml deleted file mode 100644 index be9ec41419a09..0000000000000 --- a/docs/changelog/112565.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112565 -summary: Server-Sent Events for Inference response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112571.yaml b/docs/changelog/112571.yaml deleted file mode 100644 index f1be2e5c291de..0000000000000 --- a/docs/changelog/112571.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 112571 -summary: Deprecate dot-prefixed indices and composable template index patterns -area: CRUD -type: deprecation -issues: [] -deprecation: - title: Deprecate dot-prefixed indices and composable template index patterns - area: CRUD - details: "Indices beginning with a dot '.' are reserved for system and internal\ - \ indices, and should not be used by and end-user. Additionally, composable index\ - \ templates that contain patterns for dot-prefixed indices should also be avoided,\ - \ as these patterns are meant for internal use only. In a future Elasticsearch\ - \ version, creation of these dot-prefixed indices will no longer be allowed." - impact: "Requests performing an action that would create an index beginning with\ - \ a dot (indexing a document, manual creation, reindex), or creating an index\ - \ template with index patterns beginning with a dot, will contain a deprecation\ - \ header warning about dot-prefixed indices in the response." diff --git a/docs/changelog/112574.yaml b/docs/changelog/112574.yaml deleted file mode 100644 index 3111697a8b97f..0000000000000 --- a/docs/changelog/112574.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112574 -summary: Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/112595.yaml b/docs/changelog/112595.yaml deleted file mode 100644 index 19ee0368475ae..0000000000000 --- a/docs/changelog/112595.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112595 -summary: Collect and display execution metadata for ES|QL cross cluster searches -area: ES|QL -type: enhancement -issues: - - 112402 diff --git a/docs/changelog/112612.yaml b/docs/changelog/112612.yaml deleted file mode 100644 index d6037e34ff171..0000000000000 --- a/docs/changelog/112612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112612 -summary: Set `replica_unassigned_buffer_time` in constructor -area: Health -type: bug -issues: [] diff --git a/docs/changelog/112645.yaml b/docs/changelog/112645.yaml deleted file mode 100644 index cf4ef4609a1f3..0000000000000 --- a/docs/changelog/112645.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112645 -summary: Add support for multi-value dimensions -area: Mapping -type: enhancement -issues: - - 110387 diff --git a/docs/changelog/112652.yaml b/docs/changelog/112652.yaml deleted file mode 100644 index c7ddcd4bffdc8..0000000000000 --- a/docs/changelog/112652.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110399 -summary: "[Inference API] alibabacloud ai search service support chunk infer to support semantic_text field" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112665.yaml b/docs/changelog/112665.yaml deleted file mode 100644 index ae2cf7f171f4b..0000000000000 --- a/docs/changelog/112665.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 112665 -summary: Remove zstd feature flag for index codec best compression -area: Codec -type: enhancement -issues: [] -highlight: - title: Enable ZStandard compression for indices with index.codec set to best_compression - body: |- - Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to - best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with - index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a - similar indexing throughput depending on what options are used. Experiments with indexing logs have shown that - ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput compared to DEFLATE. - notable: true diff --git a/docs/changelog/112677.yaml b/docs/changelog/112677.yaml deleted file mode 100644 index 89662236c6ca5..0000000000000 --- a/docs/changelog/112677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112677 -summary: Stream OpenAI Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112678.yaml b/docs/changelog/112678.yaml deleted file mode 100644 index 7a1a9d622a65f..0000000000000 --- a/docs/changelog/112678.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112678 -summary: Make "too many clauses" throw IllegalArgumentException to avoid 500s -area: Search -type: bug -issues: - - 112177 \ No newline at end of file diff --git a/docs/changelog/112687.yaml b/docs/changelog/112687.yaml deleted file mode 100644 index dd079e1b700c4..0000000000000 --- a/docs/changelog/112687.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112687 -summary: Add `TaskManager` to `pluginServices` -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/112706.yaml b/docs/changelog/112706.yaml deleted file mode 100644 index fc0f5c4c554a1..0000000000000 --- a/docs/changelog/112706.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112706 -summary: Configure keeping source in `FieldMapper` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/112707.yaml b/docs/changelog/112707.yaml deleted file mode 100644 index 9f16cfcd2b6f2..0000000000000 --- a/docs/changelog/112707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112707 -summary: Deduplicate `BucketOrder` when deserializing -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112723.yaml b/docs/changelog/112723.yaml deleted file mode 100644 index dbee3232d1c75..0000000000000 --- a/docs/changelog/112723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112723 -summary: Improve DateTime error handling and add some bad date tests -area: Search -type: bug -issues: - - 112190 diff --git a/docs/changelog/112768.yaml b/docs/changelog/112768.yaml deleted file mode 100644 index 13d5b8eaae38f..0000000000000 --- a/docs/changelog/112768.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112768 -summary: Deduplicate Kuromoji User Dictionary -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/112826.yaml b/docs/changelog/112826.yaml deleted file mode 100644 index 65c05b4d6035a..0000000000000 --- a/docs/changelog/112826.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112826 -summary: "Multi term intervals: increase max_expansions" -area: Search -type: enhancement -issues: - - 110491 diff --git a/docs/changelog/112850.yaml b/docs/changelog/112850.yaml deleted file mode 100644 index 97a8877f6291c..0000000000000 --- a/docs/changelog/112850.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112850 -summary: Fix synthetic source field names for multi-fields -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/112874.yaml b/docs/changelog/112874.yaml deleted file mode 100644 index 99ed9ed28fa0f..0000000000000 --- a/docs/changelog/112874.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112874 -summary: Reduce heap usage for `AggregatorsReducer` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112888.yaml b/docs/changelog/112888.yaml deleted file mode 100644 index 48806a491e531..0000000000000 --- a/docs/changelog/112888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112888 -summary: Fix `getDatabaseType` for unusual MMDBs -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/112895.yaml b/docs/changelog/112895.yaml deleted file mode 100644 index 59d391f649280..0000000000000 --- a/docs/changelog/112895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112895 -summary: (logger) change from error to warn for short circuiting user -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/112905.yaml b/docs/changelog/112905.yaml deleted file mode 100644 index aac0b7e9dfb59..0000000000000 --- a/docs/changelog/112905.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112905 -summary: "[ES|QL] Named parameter for field names and field name patterns" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112916.yaml b/docs/changelog/112916.yaml deleted file mode 100644 index 91dc7f332efc4..0000000000000 --- a/docs/changelog/112916.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112916 -summary: Allow out of range term queries for numeric types -area: Search -type: bug -issues: [] diff --git a/docs/changelog/112929.yaml b/docs/changelog/112929.yaml deleted file mode 100644 index e5f49897432de..0000000000000 --- a/docs/changelog/112929.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112929 -summary: "ES|QL: Add support for cached strings in plan serialization" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112933.yaml b/docs/changelog/112933.yaml deleted file mode 100644 index 222cd5aadf739..0000000000000 --- a/docs/changelog/112933.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112933 -summary: "Allow incubating Panama Vector in simdvec, and add vectorized `ipByteBin`" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/112938.yaml b/docs/changelog/112938.yaml deleted file mode 100644 index 82b98871c3352..0000000000000 --- a/docs/changelog/112938.yaml +++ /dev/null @@ -1,35 +0,0 @@ -pr: 112938 -summary: Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function -area: ES|QL -type: enhancement -issues: - - 109973 -highlight: - title: Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function - body: |- - The most used and likely most valuable geospatial search query in Elasticsearch is the sorted proximity search, - finding items within a certain distance of a point of interest and sorting the results by distance. - This has been possible in ES|QL since 8.15.0, but the sorting was done in-memory, not pushed down to Lucene. - Now the sorting is pushed down to Lucene, which results in a significant performance improvement. - - Queries that perform both filtering and sorting on distance are supported. For example: - - [source,esql] - ---- - FROM test - | EVAL distance = ST_DISTANCE(location, TO_GEOPOINT("POINT(37.7749, -122.4194)")) - | WHERE distance < 1000000 - | SORT distance ASC, name DESC - | LIMIT 10 - ---- - - In addition, the support for sorting on EVAL expressions has been extended to cover references to fields: - - [source,esql] - ---- - FROM test - | EVAL ref = field - | SORT ref ASC - | LIMIT 10 - ---- - notable: false diff --git a/docs/changelog/112972.yaml b/docs/changelog/112972.yaml deleted file mode 100644 index 5332ac13fd13f..0000000000000 --- a/docs/changelog/112972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112972 -summary: "ILM: Add `total_shards_per_node` setting to searchable snapshot" -area: ILM+SLM -type: enhancement -issues: - - 112261 diff --git a/docs/changelog/112973.yaml b/docs/changelog/112973.yaml deleted file mode 100644 index 3ba86a31334ff..0000000000000 --- a/docs/changelog/112973.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112973 -summary: Fix verbose get data stream API not requiring extra privileges -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/113013.yaml b/docs/changelog/113013.yaml deleted file mode 100644 index 1cec31074e806..0000000000000 --- a/docs/changelog/113013.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113013 -summary: Account for `DelayedBucket` before reduction -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/113027.yaml b/docs/changelog/113027.yaml deleted file mode 100644 index 825740cf5691d..0000000000000 --- a/docs/changelog/113027.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113027 -summary: Retrieve the source for objects and arrays in a separate parsing phase -area: Mapping -type: bug -issues: - - 112374 diff --git a/docs/changelog/113051.yaml b/docs/changelog/113051.yaml deleted file mode 100644 index 9be68f9f2b03e..0000000000000 --- a/docs/changelog/113051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113051 -summary: Add Search Inference ID To Semantic Text Mapping -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/113103.yaml b/docs/changelog/113103.yaml deleted file mode 100644 index 2ed98e0907bae..0000000000000 --- a/docs/changelog/113103.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113103 -summary: "ESQL: Align year diffing to the rest of the units in DATE_DIFF: chronological" -area: ES|QL -type: bug -issues: - - 112482 diff --git a/docs/changelog/113143.yaml b/docs/changelog/113143.yaml deleted file mode 100644 index 4a2044cca0ce4..0000000000000 --- a/docs/changelog/113143.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 113143 -summary: Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 -area: Analysis -type: deprecation -issues: [] -deprecation: - title: Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 - area: Analysis - details: kp, dutch_kp, dutchKp and lovins stemmers are deprecated and will be removed. - impact: These stemmers will be removed and will be no longer supported. diff --git a/docs/changelog/113158.yaml b/docs/changelog/113158.yaml deleted file mode 100644 index d097ea11b3a23..0000000000000 --- a/docs/changelog/113158.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113158 -summary: Adds a new Inference API for streaming responses back to the user. -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113172.yaml b/docs/changelog/113172.yaml deleted file mode 100644 index 2d03196b0cfbd..0000000000000 --- a/docs/changelog/113172.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113172 -summary: "[ESQL] Add finish() elapsed time to aggregation profiling times" -area: ES|QL -type: enhancement -issues: - - 112950 diff --git a/docs/changelog/113183.yaml b/docs/changelog/113183.yaml deleted file mode 100644 index f30ce9831adb3..0000000000000 --- a/docs/changelog/113183.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113183 -summary: "ESQL: TOP support for strings" -area: ES|QL -type: feature -issues: - - 109849 diff --git a/docs/changelog/113187.yaml b/docs/changelog/113187.yaml deleted file mode 100644 index 397179c4bc3bb..0000000000000 --- a/docs/changelog/113187.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113187 -summary: Preserve Step Info Across ILM Auto Retries -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/113194.yaml b/docs/changelog/113194.yaml new file mode 100644 index 0000000000000..132659321c65e --- /dev/null +++ b/docs/changelog/113194.yaml @@ -0,0 +1,5 @@ +pr: 113194 +summary: Add Search Phase APM metrics +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/113251.yaml b/docs/changelog/113251.yaml deleted file mode 100644 index 49167e6e4c915..0000000000000 --- a/docs/changelog/113251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113251 -summary: Span term query to convert to match no docs when unmapped field is targeted -area: Search -type: bug -issues: [] diff --git a/docs/changelog/113276.yaml b/docs/changelog/113276.yaml deleted file mode 100644 index 87241878b3ec4..0000000000000 --- a/docs/changelog/113276.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113276 -summary: Adding component template substitutions to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113280.yaml b/docs/changelog/113280.yaml deleted file mode 100644 index 1d8de0d87dd0d..0000000000000 --- a/docs/changelog/113280.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113280 -summary: Warn for model load failures if they have a status code <500 -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/113286.yaml b/docs/changelog/113286.yaml deleted file mode 100644 index eeffb10b4e638..0000000000000 --- a/docs/changelog/113286.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 113286 -summary: Deprecate legacy params from range query -area: Search -type: deprecation -issues: [] -deprecation: - title: Deprecate legacy params from range query - area: REST API - details: Range query will not longer accept `to`, `from`, `include_lower`, and `include_upper` parameters. - impact: Instead use `gt`, `gte`, `lt` and `lte` parameters. diff --git a/docs/changelog/113297.yaml b/docs/changelog/113297.yaml deleted file mode 100644 index 476619f432639..0000000000000 --- a/docs/changelog/113297.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113297 -summary: "[ES|QL] add reverse function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/113314.yaml b/docs/changelog/113314.yaml deleted file mode 100644 index c496ad3dd86f1..0000000000000 --- a/docs/changelog/113314.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113314 -summary: "[ES|QL] Check expression resolved before checking its data type in `ImplicitCasting`" -area: ES|QL -type: bug -issues: - - 113242 diff --git a/docs/changelog/113333.yaml b/docs/changelog/113333.yaml deleted file mode 100644 index c6a3584845729..0000000000000 --- a/docs/changelog/113333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113333 -summary: Upgrade to Lucene 9.12 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/113373.yaml b/docs/changelog/113373.yaml deleted file mode 100644 index cbb3829e03425..0000000000000 --- a/docs/changelog/113373.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113373 -summary: Implement `parseBytesRef` for `TimeSeriesRoutingHashFieldType` -area: TSDB -type: bug -issues: - - 112399 diff --git a/docs/changelog/113374.yaml b/docs/changelog/113374.yaml deleted file mode 100644 index f1d5750de0f60..0000000000000 --- a/docs/changelog/113374.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113374 -summary: Add ESQL match function -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/113385.yaml b/docs/changelog/113385.yaml deleted file mode 100644 index 9cee1ebcd4f64..0000000000000 --- a/docs/changelog/113385.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113385 -summary: Small performance improvement in h3 library -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/113387.yaml b/docs/changelog/113387.yaml deleted file mode 100644 index 4819404a55809..0000000000000 --- a/docs/changelog/113387.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113387 -summary: "Add `CircuitBreaker` to TDigest, Step 3: Connect with ESQL CB" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/113498.yaml b/docs/changelog/113498.yaml deleted file mode 100644 index 93b21a1d171eb..0000000000000 --- a/docs/changelog/113498.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113498 -summary: Listing all available databases in the _ingest/geoip/database API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113499.yaml b/docs/changelog/113499.yaml deleted file mode 100644 index a4d7f28eb0de4..0000000000000 --- a/docs/changelog/113499.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113499 -summary: Fix synthetic source for flattened field when used with `ignore_above` -area: Logs -type: bug -issues: - - 112044 diff --git a/docs/changelog/113552.yaml b/docs/changelog/113552.yaml deleted file mode 100644 index 48f7da309e82e..0000000000000 --- a/docs/changelog/113552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113552 -summary: Tag redacted document in ingest metadata -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113570.yaml b/docs/changelog/113570.yaml deleted file mode 100644 index 8cfad9195c5cd..0000000000000 --- a/docs/changelog/113570.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 113570 -summary: Fix `ignore_above` handling in synthetic source when index level setting - is used -area: Logs -type: bug -issues: - - 113538 diff --git a/docs/changelog/113588.yaml b/docs/changelog/113588.yaml deleted file mode 100644 index e797100443f54..0000000000000 --- a/docs/changelog/113588.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113588 -summary: Add asset criticality indices for `kibana_system_user` -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/113607.yaml b/docs/changelog/113607.yaml deleted file mode 100644 index eb25d2600a555..0000000000000 --- a/docs/changelog/113607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113607 -summary: Add more `dense_vector` details for cluster stats field stats -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113613.yaml b/docs/changelog/113613.yaml deleted file mode 100644 index 4b020333aaa36..0000000000000 --- a/docs/changelog/113613.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 113613 -summary: "Add `CircuitBreaker` to TDigest, Step 4: Take into account shallow classes\ - \ size" -area: ES|QL -type: enhancement -issues: - - 113916 diff --git a/docs/changelog/113623.yaml b/docs/changelog/113623.yaml deleted file mode 100644 index 8587687d27080..0000000000000 --- a/docs/changelog/113623.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113623 -summary: "Adding chunking settings to `MistralService,` `GoogleAiStudioService,` and\ - \ `HuggingFaceService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113690.yaml b/docs/changelog/113690.yaml deleted file mode 100644 index bd5f1245f471e..0000000000000 --- a/docs/changelog/113690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113690 -summary: Add object param for keeping synthetic source -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/113723.yaml b/docs/changelog/113723.yaml deleted file mode 100644 index 2cbcf49102719..0000000000000 --- a/docs/changelog/113723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113723 -summary: Fix max file size check to use `getMaxFileSize` -area: Infra/Core -type: bug -issues: - - 113705 diff --git a/docs/changelog/113735.yaml b/docs/changelog/113735.yaml deleted file mode 100644 index 4f6579c7cb9e0..0000000000000 --- a/docs/changelog/113735.yaml +++ /dev/null @@ -1,28 +0,0 @@ -pr: 113735 -summary: "ESQL: Introduce per agg filter" -area: ES|QL -type: feature -issues: [] -highlight: - title: "ESQL: Introduce per agg filter" - body: |- - Add support for aggregation scoped filters that work dynamically on the - data in each group. - - [source,esql] - ---- - | STATS success = COUNT(*) WHERE 200 <= code AND code < 300, - redirect = COUNT(*) WHERE 300 <= code AND code < 400, - client_err = COUNT(*) WHERE 400 <= code AND code < 500, - server_err = COUNT(*) WHERE 500 <= code AND code < 600, - total_count = COUNT(*) - ---- - - Implementation wise, the base AggregateFunction has been extended to - allow a filter to be passed on. This is required to incorporate the - filter as part of the aggregate equality/identity which would fail with - the filter as an external component. - As part of the process, the serialization for the existing aggregations - had to be fixed so AggregateFunction implementations so that it - delegates to their parent first. - notable: true diff --git a/docs/changelog/113812.yaml b/docs/changelog/113812.yaml deleted file mode 100644 index 04498b4ae5f7e..0000000000000 --- a/docs/changelog/113812.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113812 -summary: Add Streaming Inference spec -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113816.yaml b/docs/changelog/113816.yaml deleted file mode 100644 index 8c7cf14e356b3..0000000000000 --- a/docs/changelog/113816.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113816 -summary: Avoid using concurrent collector manager in `LuceneChangesSnapshot` -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113825.yaml b/docs/changelog/113825.yaml deleted file mode 100644 index 6d4090fda7ed2..0000000000000 --- a/docs/changelog/113825.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 113825 -summary: Cross-cluster search telemetry -area: Search -type: feature -issues: [] -highlight: - title: Cross-cluster search telemetry - body: |- - The cross-cluster search telemetry is collected when cross-cluster searches - are performed, and is returned as "ccs" field in `_cluster/stats` output. - It also add a new parameter `include_remotes=true` to the `_cluster/stats` API - which will collect data from connected remote clusters. diff --git a/docs/changelog/113873.yaml b/docs/changelog/113873.yaml deleted file mode 100644 index ac52aaf94d518..0000000000000 --- a/docs/changelog/113873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113873 -summary: Default inference endpoint for ELSER -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113897.yaml b/docs/changelog/113897.yaml deleted file mode 100644 index db0c53518613c..0000000000000 --- a/docs/changelog/113897.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113897 -summary: "Add chunking settings configuration to `CohereService,` `AmazonBedrockService,`\ - \ and `AzureOpenAiService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113910.yaml b/docs/changelog/113910.yaml deleted file mode 100644 index aa9d3b61fe768..0000000000000 --- a/docs/changelog/113910.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113910 -summary: Do not expand dots when storing objects in ignored source -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/113911.yaml b/docs/changelog/113911.yaml deleted file mode 100644 index 5c2f93a6ea76a..0000000000000 --- a/docs/changelog/113911.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113911 -summary: Enable OpenAI Streaming -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113967.yaml b/docs/changelog/113967.yaml deleted file mode 100644 index 58b72eba49deb..0000000000000 --- a/docs/changelog/113967.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 113967 -summary: "ESQL: Entirely remove META FUNCTIONS" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ESQL: Entirely remove META FUNCTIONS" - area: ES|QL - details: | - Removes an undocumented syntax from ESQL: META FUNCTION. This was never - reliable or really useful. Consult the documentation instead. - impact: "Removes an undocumented syntax from ESQL: META FUNCTION" - notable: false diff --git a/docs/changelog/113975.yaml b/docs/changelog/113975.yaml deleted file mode 100644 index 632ba038271bb..0000000000000 --- a/docs/changelog/113975.yaml +++ /dev/null @@ -1,19 +0,0 @@ -pr: 113975 -summary: JDK locale database change -area: Mapping -type: breaking -issues: [] -breaking: - title: JDK locale database change - area: Mapping - details: | - {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. - - If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. - impact: | - This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. - - Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. - - For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. - notable: true diff --git a/docs/changelog/113981.yaml b/docs/changelog/113981.yaml deleted file mode 100644 index 38f3a6f04ae46..0000000000000 --- a/docs/changelog/113981.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113981 -summary: "Adding chunking settings to `GoogleVertexAiService,` `AzureAiStudioService,`\ - \ and `AlibabaCloudSearchService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113988.yaml b/docs/changelog/113988.yaml deleted file mode 100644 index d55e7eb2db326..0000000000000 --- a/docs/changelog/113988.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113988 -summary: Track search and fetch failure stats -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/113989.yaml b/docs/changelog/113989.yaml deleted file mode 100644 index 7bf50b52d9e07..0000000000000 --- a/docs/changelog/113989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113989 -summary: Add `max_multipart_parts` setting to S3 repository -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/114021.yaml b/docs/changelog/114021.yaml deleted file mode 100644 index e9dab5dce5685..0000000000000 --- a/docs/changelog/114021.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114021 -summary: "ESQL: Speed up grouping by bytes" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114080.yaml b/docs/changelog/114080.yaml deleted file mode 100644 index 395768c46369a..0000000000000 --- a/docs/changelog/114080.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114080 -summary: Stream Cohere Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114109.yaml b/docs/changelog/114109.yaml deleted file mode 100644 index ce51ed50f724c..0000000000000 --- a/docs/changelog/114109.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114109 -summary: Update cluster stats for retrievers -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/114128.yaml b/docs/changelog/114128.yaml deleted file mode 100644 index 721649d0d6fe0..0000000000000 --- a/docs/changelog/114128.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114128 -summary: Adding `index_template_substitutions` to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114157.yaml b/docs/changelog/114157.yaml deleted file mode 100644 index 22e0fda173e98..0000000000000 --- a/docs/changelog/114157.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114157 -summary: Add a `terminate` ingest processor -area: Ingest Node -type: feature -issues: - - 110218 diff --git a/docs/changelog/114168.yaml b/docs/changelog/114168.yaml deleted file mode 100644 index 58f1ab7110e7d..0000000000000 --- a/docs/changelog/114168.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114168 -summary: Add a query rules tester API call -area: Relevance -type: enhancement -issues: [] diff --git a/docs/changelog/114193.yaml b/docs/changelog/114193.yaml new file mode 100644 index 0000000000000..f18f9359007b8 --- /dev/null +++ b/docs/changelog/114193.yaml @@ -0,0 +1,5 @@ +pr: 114193 +summary: Add postal_code support to the City and Enterprise databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114202.yaml b/docs/changelog/114202.yaml new file mode 100644 index 0000000000000..50313b8938aa9 --- /dev/null +++ b/docs/changelog/114202.yaml @@ -0,0 +1,14 @@ +pr: 114202 +summary: Remove deprecated `xpack.searchable.snapshot.allocate_on_rolling_restart` setting +area: Snapshot/Restore +type: breaking +issues: [] +breaking: + title: Remove deprecated `xpack.searchable.snapshot.allocate_on_rolling_restart` setting + area: 'Cluster and node setting' + details: >- + The `xpack.searchable.snapshot.allocate_on_rolling_restart` setting was created as an escape-hatch just in case + relying on the `cluster.routing.allocation.enable=primaries` setting for allocating searchable snapshots during + rolling restarts had some unintended side-effects. It has been deprecated since 8.2.0. + impact: Remove `xpack.searchable.snapshot.allocate_on_rolling_restart` from your settings if present. + notable: false diff --git a/docs/changelog/114234.yaml b/docs/changelog/114234.yaml deleted file mode 100644 index 0f77ada794bee..0000000000000 --- a/docs/changelog/114234.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114234 -summary: Prevent flattening of ordered and unordered interval sources -area: Search -type: bug -issues: [] diff --git a/docs/changelog/114268.yaml b/docs/changelog/114268.yaml new file mode 100644 index 0000000000000..5e4457005d7d3 --- /dev/null +++ b/docs/changelog/114268.yaml @@ -0,0 +1,5 @@ +pr: 114268 +summary: Support more maxmind fields in the geoip processor +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114271.yaml b/docs/changelog/114271.yaml deleted file mode 100644 index 7b47b922ff811..0000000000000 --- a/docs/changelog/114271.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114271 -summary: "[ES|QL] Skip validating remote cluster index names in parser" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114295.yaml b/docs/changelog/114295.yaml deleted file mode 100644 index 2acdc293a206c..0000000000000 --- a/docs/changelog/114295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114295 -summary: "Reprocess operator file settings when settings service starts, due to node restart or master node change" -area: Infra/Settings -type: enhancement -issues: [ ] diff --git a/docs/changelog/114309.yaml b/docs/changelog/114309.yaml deleted file mode 100644 index bcd1262062943..0000000000000 --- a/docs/changelog/114309.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114309 -summary: Upgrade to AWS SDK v2 -area: Machine Learning -type: enhancement -issues: - - 110590 diff --git a/docs/changelog/114321.yaml b/docs/changelog/114321.yaml deleted file mode 100644 index 286a72cfee840..0000000000000 --- a/docs/changelog/114321.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114321 -summary: Stream Anthropic Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114358.yaml b/docs/changelog/114358.yaml deleted file mode 100644 index 972bc5bfdbe1c..0000000000000 --- a/docs/changelog/114358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114358 -summary: "ESQL: Use less memory in listener" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114363.yaml b/docs/changelog/114363.yaml deleted file mode 100644 index 51ca9ed34a7ca..0000000000000 --- a/docs/changelog/114363.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114363 -summary: Give the kibana system user permission to read security entities -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/114368.yaml b/docs/changelog/114368.yaml deleted file mode 100644 index 6c6e215a1bd49..0000000000000 --- a/docs/changelog/114368.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114368 -summary: "ESQL: Delay construction of warnings" -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/114375.yaml b/docs/changelog/114375.yaml deleted file mode 100644 index 7ff7cc60b34ba..0000000000000 --- a/docs/changelog/114375.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114375 -summary: Handle `InternalSendException` inline for non-forking handlers -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/114382.yaml b/docs/changelog/114382.yaml deleted file mode 100644 index 9f572e14f4737..0000000000000 --- a/docs/changelog/114382.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114382 -summary: "[ES|QL] Add hypot function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114386.yaml b/docs/changelog/114386.yaml deleted file mode 100644 index cf9edda9de21e..0000000000000 --- a/docs/changelog/114386.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114386 -summary: Improve handling of failure to create persistent task -area: Task Management -type: bug -issues: [] diff --git a/docs/changelog/114389.yaml b/docs/changelog/114389.yaml deleted file mode 100644 index f56b165bc917e..0000000000000 --- a/docs/changelog/114389.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114389 -summary: Filter empty task settings objects from the API response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114407.yaml b/docs/changelog/114407.yaml deleted file mode 100644 index 4c1134a9d3834..0000000000000 --- a/docs/changelog/114407.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114407 -summary: Fix synthetic source handling for `bit` type in `dense_vector` field -area: Search -type: bug -issues: - - 114402 diff --git a/docs/changelog/114411.yaml b/docs/changelog/114411.yaml deleted file mode 100644 index 23bff3c8e25ba..0000000000000 --- a/docs/changelog/114411.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114411 -summary: "ESQL: Push down filters even in case of renames in Evals" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114429.yaml b/docs/changelog/114429.yaml deleted file mode 100644 index 56b0ffe7b43fb..0000000000000 --- a/docs/changelog/114429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114429 -summary: Add chunking settings configuration to `ElasticsearchService/ELSER` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114439.yaml b/docs/changelog/114439.yaml deleted file mode 100644 index fd097d02f885f..0000000000000 --- a/docs/changelog/114439.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114439 -summary: Adding new bbq index types behind a feature flag -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/114453.yaml b/docs/changelog/114453.yaml deleted file mode 100644 index 0d5345ad9d2a6..0000000000000 --- a/docs/changelog/114453.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114453 -summary: Switch default chunking strategy to sentence -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114457.yaml b/docs/changelog/114457.yaml deleted file mode 100644 index 9558c41852f69..0000000000000 --- a/docs/changelog/114457.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114457 -summary: "[Inference API] Introduce Update API to change some aspects of existing\ - \ inference endpoints" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114464.yaml b/docs/changelog/114464.yaml deleted file mode 100644 index 5f5ee816aa28d..0000000000000 --- a/docs/changelog/114464.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114464 -summary: Stream Azure Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114512.yaml b/docs/changelog/114512.yaml deleted file mode 100644 index 10dea3a2cbac1..0000000000000 --- a/docs/changelog/114512.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114512 -summary: Ensure clean thread context in `MasterService` -area: Cluster Coordination -type: bug -issues: [] diff --git a/docs/changelog/114521.yaml b/docs/changelog/114521.yaml new file mode 100644 index 0000000000000..c3a9c7cdd0848 --- /dev/null +++ b/docs/changelog/114521.yaml @@ -0,0 +1,5 @@ +pr: 114521 +summary: Add support for registered country fields for maxmind geoip databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114527.yaml b/docs/changelog/114527.yaml deleted file mode 100644 index 74d95edcd1a1d..0000000000000 --- a/docs/changelog/114527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114527 -summary: Verify Maxmind database types in the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114533.yaml b/docs/changelog/114533.yaml deleted file mode 100644 index f45589e8de921..0000000000000 --- a/docs/changelog/114533.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114533 -summary: Fix dim validation for bit `element_type` -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/114548.yaml b/docs/changelog/114548.yaml new file mode 100644 index 0000000000000..b9692bcb2d10c --- /dev/null +++ b/docs/changelog/114548.yaml @@ -0,0 +1,5 @@ +pr: 114548 +summary: Support IPinfo database configurations +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114549.yaml b/docs/changelog/114549.yaml deleted file mode 100644 index a6bdbba93876b..0000000000000 --- a/docs/changelog/114549.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114549 -summary: Send mid-stream errors to users -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/114552.yaml b/docs/changelog/114552.yaml deleted file mode 100644 index 00e2f95b5038d..0000000000000 --- a/docs/changelog/114552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114552 -summary: Improve exception message for bad environment variable placeholders in settings -area: Infra/Settings -type: enhancement -issues: [110858] diff --git a/docs/changelog/114596.yaml b/docs/changelog/114596.yaml deleted file mode 100644 index a36978dcacd8c..0000000000000 --- a/docs/changelog/114596.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114596 -summary: Stream Google Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114601.yaml b/docs/changelog/114601.yaml deleted file mode 100644 index d2f563d62a639..0000000000000 --- a/docs/changelog/114601.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114601 -summary: Support semantic_text in object fields -area: Vector Search -type: bug -issues: - - 114401 diff --git a/docs/changelog/114638.yaml b/docs/changelog/114638.yaml deleted file mode 100644 index 0386aacfe3e18..0000000000000 --- a/docs/changelog/114638.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 114638 -summary: "ES|QL: Restrict sorting for `_source` and counter field types" -area: ES|QL -type: bug -issues: - - 114423 - - 111976 diff --git a/docs/changelog/114683.yaml b/docs/changelog/114683.yaml deleted file mode 100644 index a677e65a12b0e..0000000000000 --- a/docs/changelog/114683.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114683 -summary: Default inference endpoint for the multilingual-e5-small model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114715.yaml b/docs/changelog/114715.yaml deleted file mode 100644 index 0894cb2fa42ca..0000000000000 --- a/docs/changelog/114715.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114715 -summary: Ignore unrecognized openai sse fields -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/114719.yaml b/docs/changelog/114719.yaml deleted file mode 100644 index 477d656d5b979..0000000000000 --- a/docs/changelog/114719.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114719 -summary: Wait for allocation on scale up -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114732.yaml b/docs/changelog/114732.yaml deleted file mode 100644 index 42176cdbda443..0000000000000 --- a/docs/changelog/114732.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114732 -summary: Stream Bedrock Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114750.yaml b/docs/changelog/114750.yaml deleted file mode 100644 index f7a3c8c283934..0000000000000 --- a/docs/changelog/114750.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114750 -summary: Create an ml node inference endpoint referencing an existing model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114774.yaml b/docs/changelog/114774.yaml deleted file mode 100644 index 1becfe427fda0..0000000000000 --- a/docs/changelog/114774.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114774 -summary: "ESQL: Add support for multivalue fields in Arrow output" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114784.yaml b/docs/changelog/114784.yaml deleted file mode 100644 index 24ebe8b5fc09a..0000000000000 --- a/docs/changelog/114784.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114784 -summary: "[ES|QL] make named parameter for identifier and pattern snapshot" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114836.yaml b/docs/changelog/114836.yaml deleted file mode 100644 index 6f21d3bfb9327..0000000000000 --- a/docs/changelog/114836.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114836 -summary: Support multi-valued fields in compute engine for ST_DISTANCE -area: ES|QL -type: enhancement -issues: - - 112910 diff --git a/docs/changelog/114848.yaml b/docs/changelog/114848.yaml deleted file mode 100644 index db41e8496f787..0000000000000 --- a/docs/changelog/114848.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114848 -summary: "ESQL: Fix grammar changes around per agg filtering" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114854.yaml b/docs/changelog/114854.yaml deleted file mode 100644 index 144a10ba85043..0000000000000 --- a/docs/changelog/114854.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 114854 -summary: Adding deprecation warnings for rrf using rank and `sub_searches` -area: Search -type: deprecation -issues: [] -deprecation: - title: Adding deprecation warnings for rrf using rank and `sub_searches` - area: REST API - details: Search API parameter `sub_searches` will no longer be a supported and will be removed in future releases. Similarly, `rrf` can only be used through the specified `retriever` and no longer though the `rank` parameter - impact: Requests specifying rrf through `rank` and/or `sub_searches` elements will be disallowed in a future version. Users should instead utilize the new `retriever` parameter. diff --git a/docs/changelog/114856.yaml b/docs/changelog/114856.yaml deleted file mode 100644 index da7fae3ee18ea..0000000000000 --- a/docs/changelog/114856.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114856 -summary: "OTel mappings: avoid metrics to be rejected when attributes are malformed" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/114888.yaml b/docs/changelog/114888.yaml deleted file mode 100644 index 6b99eb82d10f3..0000000000000 --- a/docs/changelog/114888.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114888 -summary: Fix ST_CENTROID_AGG when no records are aggregated -area: ES|QL -type: bug -issues: - - 106025 diff --git a/docs/changelog/114934.yaml b/docs/changelog/114934.yaml new file mode 100644 index 0000000000000..68628993b1c80 --- /dev/null +++ b/docs/changelog/114934.yaml @@ -0,0 +1,6 @@ +pr: 114934 +summary: "[ES|QL] To_DatePeriod and To_TimeDuration return better error messages on\ + \ `union_type` fields" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/114951.yaml b/docs/changelog/114951.yaml deleted file mode 100644 index 4d40a063e2b02..0000000000000 --- a/docs/changelog/114951.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114951 -summary: Expose cluster-state role mappings in APIs -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/114990.yaml b/docs/changelog/114990.yaml deleted file mode 100644 index 2575942d15bf5..0000000000000 --- a/docs/changelog/114990.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114990 -summary: Allow for querries on `_tier` to skip shards in the `can_match` phase -area: Search -type: bug -issues: - - 114910 diff --git a/docs/changelog/115031.yaml b/docs/changelog/115031.yaml deleted file mode 100644 index d8d6e1a3f8166..0000000000000 --- a/docs/changelog/115031.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115031 -summary: Bool query early termination should also consider `must_not` clauses -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/115048.yaml b/docs/changelog/115048.yaml deleted file mode 100644 index 10844b83c6d01..0000000000000 --- a/docs/changelog/115048.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115048 -summary: Add timeout and cancellation check to rescore phase -area: Ranking -type: enhancement -issues: [] diff --git a/docs/changelog/115061.yaml b/docs/changelog/115061.yaml deleted file mode 100644 index 7d40d5ae2629e..0000000000000 --- a/docs/changelog/115061.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115061 -summary: "[ES|QL] Simplify syntax of named parameter for identifier and pattern" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/115117.yaml b/docs/changelog/115117.yaml deleted file mode 100644 index de2defcd46afd..0000000000000 --- a/docs/changelog/115117.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115117 -summary: Report JVM stats for all memory pools (97046) -area: Infra/Core -type: bug -issues: - - 97046 diff --git a/docs/changelog/115142.yaml b/docs/changelog/115142.yaml new file mode 100644 index 0000000000000..2af968ae156da --- /dev/null +++ b/docs/changelog/115142.yaml @@ -0,0 +1,6 @@ +pr: 115142 +summary: Attempt to clean up index before remote transfer +area: Recovery +type: enhancement +issues: + - 104473 diff --git a/docs/changelog/115147.yaml b/docs/changelog/115147.yaml deleted file mode 100644 index 36f40bba1da17..0000000000000 --- a/docs/changelog/115147.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115147 -summary: Fix IPinfo geolocation schema -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/115181.yaml b/docs/changelog/115181.yaml deleted file mode 100644 index 65f59d5ed0add..0000000000000 --- a/docs/changelog/115181.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115181 -summary: Always check the parent breaker with zero bytes in `PreallocatedCircuitBreakerService` -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/115194.yaml b/docs/changelog/115194.yaml deleted file mode 100644 index 0b201b9f89aa5..0000000000000 --- a/docs/changelog/115194.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115194 -summary: Update APM Java Agent to support JDK 23 -area: Infra/Metrics -type: upgrade -issues: - - 115101 - - 115100 diff --git a/docs/changelog/115245.yaml b/docs/changelog/115245.yaml deleted file mode 100644 index 294328567c3aa..0000000000000 --- a/docs/changelog/115245.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 115245 -summary: "ESQL: Fix `REVERSE` with backspace character" -area: ES|QL -type: bug -issues: - - 114372 - - 115227 - - 115228 diff --git a/docs/changelog/115308.yaml b/docs/changelog/115308.yaml deleted file mode 100644 index 163f0232a3e58..0000000000000 --- a/docs/changelog/115308.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115308 -summary: "ESQL: Disable pushdown of WHERE past STATS" -area: ES|QL -type: bug -issues: - - 115281 diff --git a/docs/changelog/115312.yaml b/docs/changelog/115312.yaml deleted file mode 100644 index acf6bbc69c36c..0000000000000 --- a/docs/changelog/115312.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115312 -summary: "ESQL: Fix filtered grouping on ords" -area: ES|QL -type: bug -issues: - - 114897 diff --git a/docs/changelog/115317.yaml b/docs/changelog/115317.yaml deleted file mode 100644 index 153f7a52f0674..0000000000000 --- a/docs/changelog/115317.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115317 -summary: Revert "Add `ResolvedExpression` wrapper" -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/115399.yaml b/docs/changelog/115399.yaml deleted file mode 100644 index 9f69657a5d167..0000000000000 --- a/docs/changelog/115399.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 115399 -summary: Adding breaking change entry for retrievers -area: Search -type: breaking -issues: [] -breaking: - title: Reworking RRF retriever to be evaluated during rewrite phase - area: REST API - details: |- - In this release (8.16), we have introduced major changes to the retrievers framework - and how they can be evaluated, focusing mainly on compound retrievers - like `rrf` and `text_similarity_reranker`, which allowed us to support full - composability (i.e. any retriever can be nested under any compound retriever), - as well as supporting additional search features like collapsing, explaining, - aggregations, and highlighting. - - To ensure consistency, and given that this rework is not available until 8.16, - `rrf` and `text_similarity_reranker` retriever queries would now - throw an exception in a mixed cluster scenario, where there are nodes - both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. - - As part of the rework, we have also removed the `_rank` property from - the responses of an `rrf` retriever. - impact: |- - - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario - with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. - - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field - will throw an exception - notable: false diff --git a/docs/changelog/115404.yaml b/docs/changelog/115404.yaml deleted file mode 100644 index e443b152955f3..0000000000000 --- a/docs/changelog/115404.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115404 -summary: Fix NPE in Get Deployment Stats -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115429.yaml b/docs/changelog/115429.yaml deleted file mode 100644 index ddf3c69183000..0000000000000 --- a/docs/changelog/115429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115429 -summary: "[otel-data] Add more kubernetes aliases" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/115430.yaml b/docs/changelog/115430.yaml deleted file mode 100644 index c2903f7751012..0000000000000 --- a/docs/changelog/115430.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115430 -summary: Prevent NPE if model assignment is removed while waiting to start -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115459.yaml b/docs/changelog/115459.yaml deleted file mode 100644 index b20a8f765c084..0000000000000 --- a/docs/changelog/115459.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115459 -summary: Guard blob store local directory creation with `doPrivileged` -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/115510.yaml b/docs/changelog/115510.yaml deleted file mode 100644 index 1e71270e18f97..0000000000000 --- a/docs/changelog/115510.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115510 -summary: Fix lingering license warning header in IP filter -area: License -type: bug -issues: - - 114865 diff --git a/docs/changelog/115594.yaml b/docs/changelog/115594.yaml deleted file mode 100644 index 91a6089dfb3ce..0000000000000 --- a/docs/changelog/115594.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115594 -summary: Update `BlobCacheBufferedIndexInput::readVLong` to correctly handle negative - long values -area: Search -type: bug -issues: [] diff --git a/docs/changelog/115624.yaml b/docs/changelog/115624.yaml deleted file mode 100644 index 1992ed65679ca..0000000000000 --- a/docs/changelog/115624.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115624 -summary: "ES|QL: fix LIMIT pushdown past MV_EXPAND" -area: ES|QL -type: bug -issues: - - 102084 - - 102061 diff --git a/docs/changelog/115656.yaml b/docs/changelog/115656.yaml deleted file mode 100644 index 13b612b052fc1..0000000000000 --- a/docs/changelog/115656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115656 -summary: Fix stream support for `TaskType.ANY` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115678.yaml b/docs/changelog/115678.yaml new file mode 100644 index 0000000000000..31240eae1ebb4 --- /dev/null +++ b/docs/changelog/115678.yaml @@ -0,0 +1,5 @@ +pr: 115678 +summary: "ESQL: extract common filter from aggs" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/115687.yaml b/docs/changelog/115687.yaml new file mode 100644 index 0000000000000..1180b4627c635 --- /dev/null +++ b/docs/changelog/115687.yaml @@ -0,0 +1,5 @@ +pr: 115687 +summary: Add default ILM policies and switch to ILM for apm-data plugin +area: Data streams +type: feature +issues: [] diff --git a/docs/changelog/115715.yaml b/docs/changelog/115715.yaml deleted file mode 100644 index 378f2c42e5e50..0000000000000 --- a/docs/changelog/115715.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115715 -summary: Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115811.yaml b/docs/changelog/115811.yaml deleted file mode 100644 index 292dc91ecb928..0000000000000 --- a/docs/changelog/115811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115811 -summary: "Prohibit changes to index mode, source, and sort settings during restore" -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/115814.yaml b/docs/changelog/115814.yaml new file mode 100644 index 0000000000000..34f1213272d6f --- /dev/null +++ b/docs/changelog/115814.yaml @@ -0,0 +1,6 @@ +pr: 115814 +summary: "[ES|QL] Implicit casting string literal to intervals" +area: ES|QL +type: enhancement +issues: + - 115352 diff --git a/docs/changelog/115823.yaml b/docs/changelog/115823.yaml deleted file mode 100644 index a6119e0fa56e4..0000000000000 --- a/docs/changelog/115823.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115823 -summary: Add ECK Role Mapping Cleanup -area: Security -type: bug -issues: [] diff --git a/docs/changelog/115834.yaml b/docs/changelog/115834.yaml deleted file mode 100644 index 91f9e9a4e2e41..0000000000000 --- a/docs/changelog/115834.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115834 -summary: Try to simplify geometries that fail with `TopologyException` -area: Geo -type: bug -issues: [] diff --git a/docs/changelog/115858.yaml b/docs/changelog/115858.yaml new file mode 100644 index 0000000000000..0c0408fa656f8 --- /dev/null +++ b/docs/changelog/115858.yaml @@ -0,0 +1,5 @@ +pr: 115858 +summary: "ESQL: optimise aggregations filtered by false/null into evals" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/115868.yaml b/docs/changelog/115868.yaml deleted file mode 100644 index abe6a63c3a4d8..0000000000000 --- a/docs/changelog/115868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115868 -summary: Forward bedrock connection errors to user -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115930.yaml b/docs/changelog/115930.yaml new file mode 100644 index 0000000000000..788a01b5cac96 --- /dev/null +++ b/docs/changelog/115930.yaml @@ -0,0 +1,5 @@ +pr: 115930 +summary: Inconsistency in the _analyzer api when the index is not included +area: Search +type: bug +issues: [] diff --git a/docs/changelog/115952.yaml b/docs/changelog/115952.yaml deleted file mode 100644 index ec57a639dc0ae..0000000000000 --- a/docs/changelog/115952.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115952 -summary: "ESQL: Fix a bug in VALUES agg" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/116015.yaml b/docs/changelog/116015.yaml deleted file mode 100644 index 693fad639f2fa..0000000000000 --- a/docs/changelog/116015.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116015 -summary: Empty percentile results no longer throw no_such_element_exception in Anomaly Detection jobs -area: Machine Learning -type: bug -issues: - - 116013 diff --git a/docs/changelog/116031.yaml b/docs/changelog/116031.yaml deleted file mode 100644 index e30552bf3b513..0000000000000 --- a/docs/changelog/116031.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116031 -summary: Resolve pipelines from template on lazy rollover write -area: Data streams -type: bug -issues: - - 112781 diff --git a/docs/changelog/116077.yaml b/docs/changelog/116077.yaml new file mode 100644 index 0000000000000..7c499c9b7acf4 --- /dev/null +++ b/docs/changelog/116077.yaml @@ -0,0 +1,14 @@ +pr: 116077 +summary: Remove `ecs` option on `user_agent` processor +area: Ingest Node +type: breaking +issues: [] +breaking: + title: Remove `ecs` option on `user_agent` processor + area: Ingest + details: >- + The `user_agent` ingest processor no longer accepts the `ecs` option. (It was previously deprecated and ignored.) + impact: >- + Users should stop using the `ecs` option when creating instances of the `user_agent` ingest processor. + The option will be removed from existing processors stored in the cluster state on upgrade. + notable: false diff --git a/docs/changelog/116086.yaml b/docs/changelog/116086.yaml deleted file mode 100644 index 73ad77d637a46..0000000000000 --- a/docs/changelog/116086.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116086 -summary: "ESQL: Fix DEBUG log of filter" -area: ES|QL -type: bug -issues: - - 116055 diff --git a/docs/changelog/116212.yaml b/docs/changelog/116212.yaml deleted file mode 100644 index 7c8756f4054cd..0000000000000 --- a/docs/changelog/116212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116212 -summary: Handle status code 0 in S3 CMU response -area: Snapshot/Restore -type: bug -issues: - - 102294 diff --git a/docs/changelog/116219.yaml b/docs/changelog/116219.yaml deleted file mode 100644 index aeeea68570e77..0000000000000 --- a/docs/changelog/116219.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116219 -summary: "[apm-data] Apply lazy rollover on index template creation" -area: Data streams -type: bug -issues: - - 116230 diff --git a/docs/changelog/116266.yaml b/docs/changelog/116266.yaml deleted file mode 100644 index 1fcc0c310962d..0000000000000 --- a/docs/changelog/116266.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116266 -summary: Align dot prefix validation with Serverless -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/116274.yaml b/docs/changelog/116274.yaml deleted file mode 100644 index 9d506c7725afd..0000000000000 --- a/docs/changelog/116274.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116274 -summary: "[ES|QL] Verify aggregation filter's type is boolean to avoid `class_cast_exception`" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/116339.yaml b/docs/changelog/116339.yaml new file mode 100644 index 0000000000000..1767183271812 --- /dev/null +++ b/docs/changelog/116339.yaml @@ -0,0 +1,5 @@ +pr: 116339 +summary: "Index stats enhancement: creation date and `tier_preference`" +area: Stats +type: feature +issues: [] diff --git a/docs/changelog/116348.yaml b/docs/changelog/116348.yaml new file mode 100644 index 0000000000000..927ffc5a6121d --- /dev/null +++ b/docs/changelog/116348.yaml @@ -0,0 +1,5 @@ +pr: 116348 +summary: "ESQL: Honor skip_unavailable setting for nonmatching indices errors at planning time" +area: ES|QL +type: enhancement +issues: [ 114531 ] diff --git a/docs/changelog/116357.yaml b/docs/changelog/116357.yaml new file mode 100644 index 0000000000000..a1a7831eab9ca --- /dev/null +++ b/docs/changelog/116357.yaml @@ -0,0 +1,5 @@ +pr: 116357 +summary: Add tracking for query rule types +area: Relevance +type: enhancement +issues: [] diff --git a/docs/changelog/116408.yaml b/docs/changelog/116408.yaml new file mode 100644 index 0000000000000..5f4c8459778a6 --- /dev/null +++ b/docs/changelog/116408.yaml @@ -0,0 +1,6 @@ +pr: 116408 +summary: Propagating nested `inner_hits` to the parent compound retriever +area: Ranking +type: bug +issues: + - 116397 diff --git a/docs/changelog/116431.yaml b/docs/changelog/116431.yaml new file mode 100644 index 0000000000000..50c6baf1d01c7 --- /dev/null +++ b/docs/changelog/116431.yaml @@ -0,0 +1,5 @@ +pr: 116431 +summary: Adds support for `input_type` field to Vertex inference service +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/116583.yaml b/docs/changelog/116583.yaml new file mode 100644 index 0000000000000..3dc8337fe5b86 --- /dev/null +++ b/docs/changelog/116583.yaml @@ -0,0 +1,7 @@ +pr: 116583 +summary: Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions +area: ES|QL +type: bug +issues: + - 116529 + - 116544 diff --git a/docs/changelog/116591.yaml b/docs/changelog/116591.yaml new file mode 100644 index 0000000000000..60ef241e197b3 --- /dev/null +++ b/docs/changelog/116591.yaml @@ -0,0 +1,5 @@ +pr: 116591 +summary: "Add support for `BYTE_LENGTH` scalar function" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/116650.yaml b/docs/changelog/116650.yaml new file mode 100644 index 0000000000000..d314a918aede9 --- /dev/null +++ b/docs/changelog/116650.yaml @@ -0,0 +1,5 @@ +pr: 116650 +summary: Fix bug in ML autoscaling when some node info is unavailable +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/116656.yaml b/docs/changelog/116656.yaml new file mode 100644 index 0000000000000..eb5d5a1cfc201 --- /dev/null +++ b/docs/changelog/116656.yaml @@ -0,0 +1,6 @@ +pr: 116656 +summary: _validate does not honour ignore_unavailable +area: Search +type: bug +issues: + - 116594 diff --git a/docs/changelog/116664.yaml b/docs/changelog/116664.yaml new file mode 100644 index 0000000000000..36915fca39731 --- /dev/null +++ b/docs/changelog/116664.yaml @@ -0,0 +1,6 @@ +pr: 116664 +summary: Hides `hugging_face_elser` service from the `GET _inference/_services API` +area: Machine Learning +type: bug +issues: + - 116644 diff --git a/docs/changelog/116676.yaml b/docs/changelog/116676.yaml new file mode 100644 index 0000000000000..8c6671e177499 --- /dev/null +++ b/docs/changelog/116676.yaml @@ -0,0 +1,5 @@ +pr: 116676 +summary: Fix handling of time exceeded exception in fetch phase +area: Search +type: bug +issues: [] diff --git a/docs/changelog/116922.yaml b/docs/changelog/116922.yaml new file mode 100644 index 0000000000000..39e63da50ea24 --- /dev/null +++ b/docs/changelog/116922.yaml @@ -0,0 +1,5 @@ +pr: 116922 +summary: Always check if index mode is logsdb +area: Logs +type: bug +issues: [] diff --git a/docs/reference/connector/docs/_connectors-overview-table.asciidoc b/docs/reference/connector/docs/_connectors-overview-table.asciidoc index f25ea3deceeee..f5f8103349dde 100644 --- a/docs/reference/connector/docs/_connectors-overview-table.asciidoc +++ b/docs/reference/connector/docs/_connectors-overview-table.asciidoc @@ -44,7 +44,7 @@ NOTE: All connectors are available as self-managed <>|*GA*|8.12+|8.12+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/salesforce.py[View code] |<>|*GA*|8.10+|8.10+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/servicenow.py[View code] |<>|*GA*|8.9+|8.9+|8.9+|8.9+|8.9+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_online.py[View code] -|<>|*Beta*|8.15+|-|8.11+|8.13+|8.14+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] +|<>|*Beta*|8.15+|-|8.11+|8.13+|8.15+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] |<>|*Preview*|8.14+|-|-|-|-|https://github.com/elastic/connectors/tree/main/connectors/sources/slack.py[View code] |<>|*Preview*|8.14+|-|-|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/teams.py[View code] |<>|*Preview*|8.14+|-|8.11+|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/zoom.py[View code] diff --git a/docs/reference/connector/docs/connectors-release-notes.asciidoc b/docs/reference/connector/docs/connectors-release-notes.asciidoc index 723671b049bf2..e1ed082365c00 100644 --- a/docs/reference/connector/docs/connectors-release-notes.asciidoc +++ b/docs/reference/connector/docs/connectors-release-notes.asciidoc @@ -4,7 +4,13 @@ Release notes ++++ -[INFO] +[NOTE] ==== -Prior to version 8.16.0, the connector release notes were published as part of the https://www.elastic.co/guide/en/enterprise-search/current/changelog.html[Enterprise Search documentation]. +Prior to version *8.16.0*, the connector release notes were published as part of the {enterprise-search-ref}/changelog.html[Enterprise Search documentation]. ==== + +*Release notes*: + +* <> + +include::release-notes/connectors-release-notes-8.16.0.asciidoc[] diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 95ff8223b4d20..21d0890e436c5 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -87,14 +87,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -138,7 +140,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions @@ -546,14 +548,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -597,7 +601,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions diff --git a/docs/reference/connector/docs/connectors-sharepoint.asciidoc b/docs/reference/connector/docs/connectors-sharepoint.asciidoc index f5590daa1e701..d7a2307a9db80 100644 --- a/docs/reference/connector/docs/connectors-sharepoint.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint.asciidoc @@ -67,6 +67,9 @@ The following SharePoint Server versions are compatible: The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -133,7 +136,7 @@ The connector syncs the following SharePoint object types: [NOTE] ==== * Content from files bigger than 10 MB won't be extracted by default. Use the <> to handle larger binary files. -* Permissions are not synced. **All documents** indexed to an Elastic deployment will be visible to **all users with access** to that Elasticsearch Index. +* Permissions are not synced by default. Enable <> to sync permissions. ==== [discrete#es-connectors-sharepoint-sync-types] @@ -191,7 +194,7 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== @@ -254,6 +257,9 @@ Once connected, you'll be able to update these values in Kibana. The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -408,5 +414,5 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== diff --git a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc new file mode 100644 index 0000000000000..7608336073176 --- /dev/null +++ b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc @@ -0,0 +1,53 @@ +[[es-connectors-release-notes-8-16-0]] +=== 8.16.0 connectors release notes + +[discrete] +[[es-connectors-release-notes-deprecation-notice]] +==== Deprecation notices + +* *Direct index access for connectors and sync jobs* ++ +IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. + +* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. +See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. + +* *Docker `enterprise-search` namespace deprecation* ++ +IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. ++ +Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. ++ +During this transition period, images are published to both namespaces: ++ +** *Example*: ++ +Deprecated namespace:: +`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` ++ +New namespace:: +`docker.elastic.co/integrations/elastic-connectors:v8.16.0` ++ +Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. + +[discrete] +[[es-connectors-release-notes-8-16-0-enhancements]] +==== Enhancements + +* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. + +* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. +See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. + +* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. +See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. + +* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. +See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. + +[discrete] +[[es-connectors-release-notes-8-16-0-bug-fixes]] +==== Bug fixes + +* The Dropbox connector now fetches the files from team shared folders. +See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. \ No newline at end of file diff --git a/docs/reference/connector/docs/sync-rules.asciidoc b/docs/reference/connector/docs/sync-rules.asciidoc index 9b2a77be7db03..3ab72093666b8 100644 --- a/docs/reference/connector/docs/sync-rules.asciidoc +++ b/docs/reference/connector/docs/sync-rules.asciidoc @@ -116,6 +116,12 @@ A "match" is determined based on a condition defined by a combination of "field" The `Field` column should be used to define which field on a given document should be considered. +[NOTE] +==== +Only top-level fields are supported. +Nested/object fields cannot be referenced with "dot notation". +==== + The following rules are available in the `Rule` column: * `equals` - The field value is equal to the specified value. diff --git a/docs/reference/esql/functions/description/bit_length.asciidoc b/docs/reference/esql/functions/description/bit_length.asciidoc index 1aad47488802d..3a3dd80d2bb0f 100644 --- a/docs/reference/esql/functions/description/bit_length.asciidoc +++ b/docs/reference/esql/functions/description/bit_length.asciidoc @@ -3,3 +3,5 @@ *Description* Returns the bit length of a string. + +NOTE: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/description/byte_length.asciidoc b/docs/reference/esql/functions/description/byte_length.asciidoc new file mode 100644 index 0000000000000..c2150806e09ac --- /dev/null +++ b/docs/reference/esql/functions/description/byte_length.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns the byte length of a string. + +NOTE: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/description/length.asciidoc b/docs/reference/esql/functions/description/length.asciidoc index bf976e3d6e507..91525fda0c086 100644 --- a/docs/reference/esql/functions/description/length.asciidoc +++ b/docs/reference/esql/functions/description/length.asciidoc @@ -3,3 +3,5 @@ *Description* Returns the character length of a string. + +NOTE: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/examples/byte_length.asciidoc b/docs/reference/esql/functions/examples/byte_length.asciidoc new file mode 100644 index 0000000000000..d6b557fcd2e76 --- /dev/null +++ b/docs/reference/esql/functions/examples/byte_length.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=byteLength] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/eval.csv-spec[tag=byteLength-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/bit_length.json b/docs/reference/esql/functions/kibana/definition/bit_length.json new file mode 100644 index 0000000000000..0c75b76cdbbfb --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/bit_length.json @@ -0,0 +1,38 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "bit_length", + "description" : "Returns the bit length of a string.", + "note" : "All strings are in UTF-8, so a single character can use multiple bytes.", + "signatures" : [ + { + "params" : [ + { + "name" : "string", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "string", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + } + ], + "examples" : [ + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city), fn_bit_length = BIT_LENGTH(city)" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/byte_length.json b/docs/reference/esql/functions/kibana/definition/byte_length.json new file mode 100644 index 0000000000000..60f439b9d8133 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/byte_length.json @@ -0,0 +1,38 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "byte_length", + "description" : "Returns the byte length of a string.", + "note" : "All strings are in UTF-8, so a single character can use multiple bytes.", + "signatures" : [ + { + "params" : [ + { + "name" : "string", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "string", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + } + ], + "examples" : [ + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city), fn_byte_length = BYTE_LENGTH(city)" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/length.json b/docs/reference/esql/functions/kibana/definition/length.json index 0da505cf5ffa7..bc26acde744f5 100644 --- a/docs/reference/esql/functions/kibana/definition/length.json +++ b/docs/reference/esql/functions/kibana/definition/length.json @@ -3,6 +3,7 @@ "type" : "eval", "name" : "length", "description" : "Returns the character length of a string.", + "note" : "All strings are in UTF-8, so a single character can use multiple bytes.", "signatures" : [ { "params" : [ @@ -30,7 +31,7 @@ } ], "examples" : [ - "FROM employees\n| KEEP first_name, last_name\n| EVAL fn_length = LENGTH(first_name)" + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/bit_length.md b/docs/reference/esql/functions/kibana/docs/bit_length.md index 22280febd7876..b1d8e24c4de76 100644 --- a/docs/reference/esql/functions/kibana/docs/bit_length.md +++ b/docs/reference/esql/functions/kibana/docs/bit_length.md @@ -6,7 +6,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Returns the bit length of a string. ``` -FROM employees -| KEEP first_name, last_name -| EVAL fn_bit_length = BIT_LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city), fn_bit_length = BIT_LENGTH(city) ``` +Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/byte_length.md b/docs/reference/esql/functions/kibana/docs/byte_length.md new file mode 100644 index 0000000000000..9cd4f87c9883b --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/byte_length.md @@ -0,0 +1,14 @@ + + +### BYTE_LENGTH +Returns the byte length of a string. + +``` +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city), fn_byte_length = BYTE_LENGTH(city) +``` +Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/length.md b/docs/reference/esql/functions/kibana/docs/length.md index 19e3533e0ddfb..aed76ee14cedb 100644 --- a/docs/reference/esql/functions/kibana/docs/length.md +++ b/docs/reference/esql/functions/kibana/docs/length.md @@ -6,7 +6,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Returns the character length of a string. ``` -FROM employees -| KEEP first_name, last_name -| EVAL fn_length = LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city) ``` +Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/layout/byte_length.asciidoc b/docs/reference/esql/functions/layout/byte_length.asciidoc new file mode 100644 index 0000000000000..56dc341264e0f --- /dev/null +++ b/docs/reference/esql/functions/layout/byte_length.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-byte_length]] +=== `BYTE_LENGTH` + +*Syntax* + +[.text-center] +image::esql/functions/signature/byte_length.svg[Embedded,opts=inline] + +include::../parameters/byte_length.asciidoc[] +include::../description/byte_length.asciidoc[] +include::../types/byte_length.asciidoc[] +include::../examples/byte_length.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/byte_length.asciidoc b/docs/reference/esql/functions/parameters/byte_length.asciidoc new file mode 100644 index 0000000000000..7bb8c080ce4a1 --- /dev/null +++ b/docs/reference/esql/functions/parameters/byte_length.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`string`:: +String expression. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/signature/byte_length.svg b/docs/reference/esql/functions/signature/byte_length.svg new file mode 100644 index 0000000000000..d88821e46e926 --- /dev/null +++ b/docs/reference/esql/functions/signature/byte_length.svg @@ -0,0 +1 @@ +BYTE_LENGTH(string) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index 422860f0a7a1d..ce9636f5c5a3a 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -9,6 +9,7 @@ // tag::string_list[] * <> +* <> * <> * <> * <> @@ -32,6 +33,7 @@ // end::string_list[] include::layout/bit_length.asciidoc[] +include::layout/byte_length.asciidoc[] include::layout/concat.asciidoc[] include::layout/ends_with.asciidoc[] include::layout/from_base64.asciidoc[] diff --git a/docs/reference/esql/functions/types/byte_length.asciidoc b/docs/reference/esql/functions/types/byte_length.asciidoc new file mode 100644 index 0000000000000..db5a48c7c4390 --- /dev/null +++ b/docs/reference/esql/functions/types/byte_length.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +string | result +keyword | integer +text | integer +|=== diff --git a/docs/reference/esql/implicit-casting.asciidoc b/docs/reference/esql/implicit-casting.asciidoc index f0c0aa3d82063..ffb6d3fc35acb 100644 --- a/docs/reference/esql/implicit-casting.asciidoc +++ b/docs/reference/esql/implicit-casting.asciidoc @@ -5,7 +5,7 @@ Implicit casting ++++ -Often users will input `datetime`, `ip`, `version`, or geospatial objects as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types. +Often users will input `date`, `ip`, `version`, `date_period` or `time_duration` as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types. Without implicit casting users must explicitly code these `to_X` functions in their queries, when string literals don't match the target data types they are assigned or compared to. Here is an example of using `to_datetime` to explicitly perform a data type conversion. @@ -18,7 +18,7 @@ FROM employees | LIMIT 1 ---- -Implicit casting improves usability, by automatically converting string literals to the target data type. This is most useful when the target data type is `datetime`, `ip`, `version` or a geo spatial. It is natural to specify these as a string in queries. +Implicit casting improves usability, by automatically converting string literals to the target data type. This is most useful when the target data type is `date`, `ip`, `version`, `date_period` or `time_duration`. It is natural to specify these as a string in queries. The first query can be coded without calling the `to_datetime` function, as follows: @@ -38,16 +38,28 @@ The following table details which {esql} operations support implicit casting for [%header.monospaced.styled,format=dsv,separator=|] |=== -||ScalarFunction|BinaryComparison|ArithmeticOperation|InListPredicate|AggregateFunction -|DATETIME|Y|Y|Y|Y|N -|DOUBLE|Y|N|N|N|N -|LONG|Y|N|N|N|N -|INTEGER|Y|N|N|N|N -|IP|Y|Y|Y|Y|N -|VERSION|Y|Y|Y|Y|N -|GEO_POINT|Y|N|N|N|N -|GEO_SHAPE|Y|N|N|N|N -|CARTESIAN_POINT|Y|N|N|N|N -|CARTESIAN_SHAPE|Y|N|N|N|N -|BOOLEAN|Y|Y|Y|Y|N +||ScalarFunction*|Operator*|<>|<> +|DATE|Y|Y|Y|N +|IP|Y|Y|Y|N +|VERSION|Y|Y|Y|N +|BOOLEAN|Y|Y|Y|N +|DATE_PERIOD/TIME_DURATION|Y|N|Y|N |=== + +ScalarFunction* includes: + +<> + +<> + +<> + + +Operator* includes: + +<> + +<> + +<> + diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index e884c01dd3509..60c32cabdb5c1 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -95,13 +95,20 @@ and https://elasticsearch-benchmarks.elastic.co/#tracks/dense_vector[here] some of datasets and configurations that we use for our nightly benchmarks. [discrete] +[[dense-vector-preloading]] include::search-speed.asciidoc[tag=warm-fs-cache] The following file extensions are used for the approximate kNN search: +Each extension is broken down by the quantization types. -* `vec` and `veq` for vector values -* `vex` for HNSW graph -* `vem`, `vemf`, and `vemq` for metadata +* `vex` for the HNSW graph +* `vec` for all non-quantized vector values. This includes all element types: `float`, `byte`, and `bit`. +* `veq` for quantized vectors indexed with <>: `int4` or `int8` +* `veb` for binary vectors indexed with <>: `bbq` +* `vem`, `vemf`, `vemq`, and `vemb` for metadata, usually small and not a concern for preloading + +Generally, if you are using a quantized index, you should only preload the relevant quantized values and the HNSW graph. +Preloading the raw vectors is not necessary and might be counterproductive. [discrete] === Reduce the number of index segments diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 9b30ba9dbde35..aba0850c76437 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -143,8 +143,8 @@ terms dictionaries, postings lists and points, which are the most important parts of the index for search and aggregations. For vector search, you use <>, -you might want to set the setting to vector search files: `["vec", "vex", "vem"]` -("vec" is used for vector values, "vex" – for HNSW graph, "vem" – for metadata). +you might want to set the setting to vector search files. See <> for a detailed +list of the files. Note that this setting can be dangerous on indices that are larger than the size of the main memory of the host, as it would cause the filesystem cache to be diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 273d743e47a4b..262bdfbca002f 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -7,6 +7,12 @@ You can also deploy ELSER by using the <>. NOTE: The API request will automatically download and deploy the ELSER model if it isn't already downloaded. +[WARNING] +.Deprecated in 8.16 +==== +The elser service is deprecated and will be removed in a future release. +Use the <> instead, with model_id included in the service_settings. +==== [discrete] [[infer-service-elser-api-request]] @@ -173,4 +179,4 @@ PUT _inference/sparse_embedding/my-elser-model } } ------------------------------------------------------------ -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/docs/reference/ingest/apis/delete-geoip-database.asciidoc b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc similarity index 52% rename from docs/reference/ingest/apis/delete-geoip-database.asciidoc rename to docs/reference/ingest/apis/delete-ip-location-database.asciidoc index 957e59f0f0de4..c3a10a914d2f4 100644 --- a/docs/reference/ingest/apis/delete-geoip-database.asciidoc +++ b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc @@ -1,30 +1,30 @@ -[[delete-geoip-database-api]] -=== Delete geoip database configuration API +[[delete-ip-location-database-api]] +=== Delete IP geolocation database configuration API ++++ -Delete geoip database configuration +Delete IP geolocation database configuration ++++ -Deletes a geoip database configuration. +Deletes a IP geolocation database configuration. [source,console] ---- -DELETE /_ingest/geoip/database/my-database-id +DELETE /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] -[[delete-geoip-database-api-request]] +[[delete-ip-location-database-api-request]] ==== {api-request-title} -`DELETE /_ingest/geoip/database/` +`DELETE /_ingest/ip_location/database/` -[[delete-geoip-database-api-prereqs]] +[[delete-ip-location-database-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` <> to use this API. -[[delete-geoip-database-api-path-params]] +[[delete-ip-location-database-api-path-params]] ==== {api-path-parms-title} ``:: @@ -35,21 +35,21 @@ DELETE /_ingest/geoip/database/my-database-id -- -[[delete-geoip-database-api-query-params]] +[[delete-ip-location-database-api-query-params]] ==== {api-query-parms-title} include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[delete-geoip-database-api-example]] +[[delete-ip-location-database-api-example]] ==== {api-examples-title} -[[delete-geoip-database-api-specific-ex]] -===== Delete a specific geoip database configuration +[[delete-ip-location-database-api-specific-ex]] +===== Delete a specific IP geolocation database configuration [source,console] ---- -DELETE /_ingest/geoip/database/example-database-id +DELETE /_ingest/ip_location/database/example-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/get-geoip-database.asciidoc b/docs/reference/ingest/apis/get-ip-location-database.asciidoc similarity index 65% rename from docs/reference/ingest/apis/get-geoip-database.asciidoc rename to docs/reference/ingest/apis/get-ip-location-database.asciidoc index f055e3e759db8..26e9ddc1eee50 100644 --- a/docs/reference/ingest/apis/get-geoip-database.asciidoc +++ b/docs/reference/ingest/apis/get-ip-location-database.asciidoc @@ -1,33 +1,33 @@ -[[get-geoip-database-api]] -=== Get geoip database configuration API +[[get-ip-location-database-api]] +=== Get IP geolocation database configuration API ++++ -Get geoip database configuration +Get IP geolocation database configuration ++++ -Returns information about one or more geoip database configurations. +Returns information about one or more IP geolocation database configurations. [source,console] ---- -GET /_ingest/geoip/database/my-database-id +GET /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] -[[get-geoip-database-api-request]] +[[get-ip-location-database-api-request]] ==== {api-request-title} -`GET /_ingest/geoip/database/` +`GET /_ingest/ip_location/database/` -`GET /_ingest/geoip/database` +`GET /_ingest/ip_location/database` -[[get-geoip-database-api-prereqs]] +[[get-ip-location-database-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` <> to use this API. -[[get-geoip-database-api-path-params]] +[[get-ip-location-database-api-path-params]] ==== {api-path-parms-title} ``:: @@ -38,22 +38,22 @@ supported. To get all database configurations, omit this parameter or use `*`. -[[get-geoip-database-api-query-params]] +[[get-ip-location-database-api-query-params]] ==== {api-query-parms-title} include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] -[[get-geoip-database-api-example]] +[[get-ip-location-database-api-example]] ==== {api-examples-title} -[[get-geoip-database-api-specific-ex]] -===== Get information for a specific geoip database configuration +[[get-ip-location-database-api-specific-ex]] +===== Get information for a specific IP geolocation database configuration [source,console] ---- -GET /_ingest/geoip/database/my-database-id +GET /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index e068f99ea0ad3..35adc47821978 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -25,16 +25,14 @@ Use the following APIs to get statistics about ingest processing: the <>. [discrete] -[[ingest-geoip-database-apis]] -=== Ingest GeoIP Database APIs - -preview::["The commercial IP geolocation database download management APIs are in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] +[[ingest-ip-location-database-apis]] +=== Ingest IP Location Database APIs Use the following APIs to configure and manage commercial IP geolocation database downloads: -* <> to create or update a database configuration -* <> to retrieve a database configuration -* <> to delete a database configuration +* <> to create or update a database configuration +* <> to retrieve a database configuration +* <> to delete a database configuration include::put-pipeline.asciidoc[] include::get-pipeline.asciidoc[] @@ -42,6 +40,6 @@ include::delete-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] include::simulate-ingest.asciidoc[] include::geoip-stats.asciidoc[] -include::put-geoip-database.asciidoc[] -include::get-geoip-database.asciidoc[] -include::delete-geoip-database.asciidoc[] +include::put-ip-location-database.asciidoc[] +include::get-ip-location-database.asciidoc[] +include::delete-ip-location-database.asciidoc[] diff --git a/docs/reference/ingest/apis/put-geoip-database.asciidoc b/docs/reference/ingest/apis/put-geoip-database.asciidoc deleted file mode 100644 index 311c303002387..0000000000000 --- a/docs/reference/ingest/apis/put-geoip-database.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[put-geoip-database-api]] -=== Create or update geoip database configuration API -++++ -Create or update geoip database configuration -++++ - -Creates or updates an IP geolocation database configuration. - -IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, -it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only -one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order -to download from Maxmind. The license key setting does not take effect until all nodes are restarted. - -[source,console] ----- -PUT _ingest/geoip/database/my-database-id -{ - "name": "GeoIP2-Domain", - "maxmind": { - "account_id": "1025402" - } -} ----- -// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] - -[[put-geoip-database-api-request]] -==== {api-request-title} - -`PUT /_ingest/geoip/database/` - -[[put-geoip-database-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the -`manage` <> to use this API. - - -[[put-geoip-database-api-path-params]] -==== {api-path-parms-title} - -``:: -+ -__ -(Required, string) ID of the database configuration to create or update. - -[[put-geoip-database-api-query-params]] -==== {api-query-parms-title} - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] - -[[put-geoip-database-api-request-body]] -==== {api-request-body-title} - -// tag::geoip-database-object[] -`name`:: -(Required, string) -The provider-assigned name of the IP geolocation database to download. - -``:: -(Required, a provider object and its associated configuration) -The configuration necessary to identify which IP geolocation provider to use to download -the database, as well as any provider-specific configuration necessary for such downloading. -+ -At present, the only supported provider is `maxmind`, and the maxmind provider -requires that an `account_id` (string) is configured. -// end::geoip-database-object[] - -[[geoip-database-configuration-licensing]] -==== Licensing - -Downloading databases from third party providers is a commercial feature that requires an -appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/ingest/apis/put-ip-location-database.asciidoc b/docs/reference/ingest/apis/put-ip-location-database.asciidoc new file mode 100644 index 0000000000000..e42d84752694c --- /dev/null +++ b/docs/reference/ingest/apis/put-ip-location-database.asciidoc @@ -0,0 +1,92 @@ +[[put-ip-location-database-api]] +=== Create or update IP geolocation database configuration API +++++ +Create or update IP geolocation database configuration +++++ + +Creates or updates an IP geolocation database configuration. + +IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, +it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only +one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order +to download from Maxmind. The license key setting does not take effect until all nodes are restarted or a +<> request is executed. + +[source,console] +---- +PUT _ingest/ip_location/database/my-database-1 +{ + "name": "GeoIP2-Domain", + "maxmind": { + "account_id": "1234567" + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + +IMPORTANT: The IPinfo configuration shown below requires a token. Because the token is sensitive information, +it is stored as a <> in {es} named `ingest.ip_location.downloader.ipinfo.token`. Only +one IPinfo token is currently allowed per {es} cluster. A valid token must be in the secure settings in order +to download from IPinfo. The token setting does not take effect until all nodes are restarted or a +<> request is executed. + +[source,console] +---- +PUT _ingest/ip_location/database/my-database-2 +{ + "name": "standard_location", + "ipinfo": { + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + + +[[put-ip-location-database-api-request]] +==== {api-request-title} + +`PUT /_ingest/ip_location/database/` + +[[put-ip-location-database-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`manage` <> to use this API. + + +[[put-ip-location-database-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +__ +(Required, string) ID of the database configuration to create or update. + +[[put-ip-location-database-api-query-params]] +==== {api-query-parms-title} + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +[[put-ip-location-database-api-request-body]] +==== {api-request-body-title} + +// tag::ip-location-database-object[] +`name`:: +(Required, string) +The provider-assigned name of the IP geolocation database to download. + +``:: +(Required, a provider object and its associated configuration) +The configuration necessary to identify which IP geolocation provider to use to download +the database, as well as any provider-specific configuration necessary for such downloading. ++ +At present, the only supported providers are `maxmind` and `ipinfo`. The maxmind provider +requires that an `account_id` (string) is configured. The ipinfo provider does not require +additional configuration in the request body. +// end::ip-location-database-object[] + +[[ip-location-database-configuration-licensing]] +==== Licensing + +Downloading databases from third party providers is a commercial feature that requires an +appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 8f7cef06d12a0..f4fcc0fc84d0d 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -77,7 +77,10 @@ Computes a hash of the document’s content. Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. <>:: -Adds information about the geographical location of an IPv4 or IPv6 address. +Adds information about the geographical location of an IPv4 or IPv6 address from a Maxmind database. + +<>:: +Adds information about the geographical location of an IPv4 or IPv6 address from an ip geolocation database. <>:: Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. @@ -245,6 +248,7 @@ include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] include::processors/html_strip.asciidoc[] include::processors/inference.asciidoc[] +include::processors/ip-location.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 3a9ba58dedbf0..78ebe3f5b5ee3 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -13,7 +13,7 @@ ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[ CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: * `ingest.geoip.downloader.eager.download` is set to true -* your cluster has at least one pipeline with a `geoip` processor +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor {es} automatically downloads updates for these databases from the Elastic GeoIP endpoint: @@ -25,10 +25,10 @@ If your cluster can't connect to the Elastic GeoIP endpoint or you want to manage your own updates, see <>. If you would like to have {es} download database files directly from Maxmind using your own provided -license key, see <>. +license key, see <>. If {es} can't connect to the endpoint for 30 days all updated databases will become -invalid. {es} will stop enriching documents with geoip data and will add `tags: ["_geoip_expired_database"]` +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_geoip_expired_database"]` field instead. [[using-ingest-geoip]] @@ -40,21 +40,23 @@ field instead. |====== | Name | Required | Default | Description | `field` | yes | - | The field to get the IP address from for the geographical lookup. -| `target_field` | no | geoip | The field that will hold the geographical information looked up from the MaxMind database. -| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). -| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. +| `target_field` | no | geoip | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -| `first_only` | no | `true` | If `true` only first found geoip data will be returned, even if `field` contains array +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array | `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. |====== *Depends on what is available in `database_file`: * If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -and `location`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, and `continent_name`. The fields actually added depend on what has been found +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured @@ -70,12 +72,12 @@ The fields actually added depend on what has been found and which properties wer `organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -`location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and -`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. - -preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. Here is an example that uses the default city database and adds the geographical information to the `geoip` field based on the `ip` field: @@ -83,7 +85,7 @@ Here is an example that uses the default city database and adds the geographical -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -134,7 +136,7 @@ this database is downloaded automatically. So this: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -186,7 +188,7 @@ cannot be found: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -252,7 +254,7 @@ PUT my_ip_locations -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -425,7 +427,7 @@ The `geoip` processor supports the following setting: The maximum number of results that should be cached. Defaults to `1000`. -Note that these settings are node settings and apply to all `geoip` processors, i.e. there is one cache for all defined `geoip` processors. +Note that these settings are node settings and apply to all `geoip` and `ip_location` processors, i.e. there is a single cache for all such processors. [[geoip-cluster-settings]] ===== Cluster settings @@ -454,7 +456,7 @@ each node's <> at `$ES_TMPDIR/geoip-databases/IP Location +++++ + +The `ip_location` processor adds information about the geographical location of an +IPv4 or IPv6 address. + +[[ip-location-automatic-updates]] +By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 +ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the +CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: + +* `ingest.geoip.downloader.eager.download` is set to true +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor + +{es} automatically downloads updates for these databases from the Elastic GeoIP +endpoint: +https://geoip.elastic.co/v1/database?elastic_geoip_service_tos=agree[https://geoip.elastic.co/v1/database]. +To get download statistics for these updates, use the <>. + +If your cluster can't connect to the Elastic GeoIP endpoint or you want to +manage your own updates, see <>. + +If you would like to have {es} download database files directly from Maxmind using your own provided +license key, see <>. + +If {es} can't connect to the endpoint for 30 days all updated databases will become +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_ip_location_expired_database"]` +field instead. + +[[using-ingest-ip-location]] +==== Using the `ip_location` Processor in a Pipeline + +[[ingest-ip-location-options]] +.`ip-location` options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to get the IP address from for the geographical lookup. +| `target_field` | no | ip_location | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array +| `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. +|====== + +*Depends on what is available in `database_file`: + +* If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found +and which properties were configured in `properties`. +* If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, +`asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured +in `properties`. +* If the GeoIP2 Anonymous IP database is used, then the following fields may be added under the `target_field`: `ip`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, and `residential_proxy`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Connection Type database is used, then the following fields may be added under the `target_field`: `ip`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`. +The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, +`organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. + +Here is an example that uses the default city database and adds the geographical information to the `ip_location` field based on the `ip` field: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 55, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "ip_location": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE", + "city_name" : "Linköping", + "region_iso_code" : "SE-E", + "region_name" : "Östergötland County", + "location": { "lat": 58.4167, "lon": 15.6167 } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term":1/"_primary_term" : $body._primary_term/] + +Here is an example that uses the default country database and adds the +geographical information to the `geo` field based on the `ip` field. Note that +this database is downloaded automatically. So this: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip", + "target_field" : "geo", + "database_file" : "GeoLite2-Country.mmdb" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +returns this: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 65, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "geo": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + + +Not all IP addresses find geo information from the database, When this +occurs, no `target_field` is inserted into the document. + +Here is an example of what documents will be indexed as when information for "80.231.5.0" +cannot be found: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} + +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "80.231.5.0" +} + +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "_index" : "my-index-000001", + "_id" : "my_id", + "_version" : 1, + "_seq_no" : 71, + "_primary_term": 1, + "found" : true, + "_source" : { + "ip" : "80.231.5.0" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 44f90eded8632..4c16f260c13e7 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -117,12 +117,10 @@ that sacrifices result accuracy for improved speed. The `dense_vector` type supports quantization to reduce the memory footprint required when <> `float` vectors. The three following quantization strategies are supported: -+ --- -`int8` - Quantizes each dimension of the vector to 1-byte integers. This reduces the memory footprint by 75% (or 4x) at the cost of some accuracy. -`int4` - Quantizes each dimension of the vector to half-byte integers. This reduces the memory footprint by 87% (or 8x) at the cost of accuracy. -`bbq` - experimental:[] Better binary quantization which reduces each dimension to a single bit precision. This reduces the memory footprint by 96% (or 32x) at a larger cost of accuracy. Generally, oversampling during query time and reranking can help mitigate the accuracy loss. --- +* `int8` - Quantizes each dimension of the vector to 1-byte integers. This reduces the memory footprint by 75% (or 4x) at the cost of some accuracy. +* `int4` - Quantizes each dimension of the vector to half-byte integers. This reduces the memory footprint by 87% (or 8x) at the cost of accuracy. +* `bbq` - experimental:[] Better binary quantization which reduces each dimension to a single bit precision. This reduces the memory footprint by 96% (or 32x) at a larger cost of accuracy. Generally, oversampling during query time and reranking can help mitigate the accuracy loss. + When using a quantized format, you may want to oversample and rescore the results to improve accuracy. See <> for more information. @@ -245,12 +243,11 @@ their vector field's similarity to the query vector. The `_score` of each document will be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds to a higher ranking. Defaults to `l2_norm` when `element_type: bit` otherwise defaults to `cosine`. - -NOTE: `bit` vectors only support `l2_norm` as their similarity metric. - + ^*^ This parameter can only be specified when `index` is `true`. + +NOTE: `bit` vectors only support `l2_norm` as their similarity metric. + .Valid values for `similarity` [%collapsible%open] ==== diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index ac23c153e01a3..684ad7c369e7d 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -87,7 +87,7 @@ Trying to <> that is used on a [discrete] [[auto-text-chunking]] -==== Automatic text chunking +==== Text chunking {infer-cap} endpoints have a limit on the amount of text they can process. To allow for large amounts of text to be used in semantic search, `semantic_text` automatically generates smaller passages if needed, called _chunks_. @@ -95,8 +95,7 @@ To allow for large amounts of text to be used in semantic search, `semantic_text Each chunk will include the text subpassage and the corresponding embedding generated from it. When querying, the individual passages will be automatically searched for each document, and the most relevant passage will be used to compute a score. -Documents are split into 250-word sections with a 100-word overlap so that each section shares 100 words with the previous section. -This overlap ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. +For more details on chunking and how to configure chunking settings, see <> in the Inference API documentation. [discrete] diff --git a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc index 6832934f6985c..304b8c7745007 100644 --- a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc +++ b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc @@ -124,7 +124,7 @@ PUT _query_rules/ruleset-3 }, { "rule_id": "rule-3", - "type": "pinned", + "type": "exclude", "criteria": [ { "type": "fuzzy", @@ -178,6 +178,9 @@ A sample response: "rule_total_count": 1, "rule_criteria_types_counts": { "exact": 1 + }, + "rule_type_counts": { + "pinned": 1 } }, { @@ -186,6 +189,9 @@ A sample response: "rule_criteria_types_counts": { "exact": 1, "fuzzy": 1 + }, + "rule_type_counts": { + "pinned": 2 } }, { @@ -194,6 +200,10 @@ A sample response: "rule_criteria_types_counts": { "exact": 1, "fuzzy": 2 + }, + "rule_type_counts": { + "pinned": 2, + "exclude": 1 } } ] diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index e0568f500f268..506dff7891ad2 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1926,4 +1926,19 @@ Refer to <>. [role="exclude",id="remote-clusters-privileges"] === Configure roles and users for remote clusters -Refer to <>. \ No newline at end of file +Refer to <>. + +[role="exclude",id="put-geoip-database-api"] +=== Create or update geoip database configuration API + +Refer to <>. + +[role="exclude",id="get-geoip-database-api"] +=== Get geoip database configuration + +Refer to <>. + +[role="exclude",id="delete-geoip-database-api"] +=== Delete geoip database configuration API + +Refer to <>. diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index a198f49383907..37f49f2445770 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-put-role]] === Bulk create or update roles API -preview::[] ++++ Bulk create or update roles API ++++ @@ -103,7 +102,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc index a782b5e37fcb9..b9978c89bef3a 100644 --- a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-delete-role]] === Bulk delete roles API -preview::[] ++++ Bulk delete roles API ++++ diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index a1ab892330e67..d23b9f06e2d87 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -105,7 +105,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. @@ -176,21 +178,29 @@ POST /_security/role/cli_or_drivers_minimal -------------------------------------------------- // end::sql-queries-permission[] -The following example configures a role with remote indices privileges on a remote cluster: +The following example configures a role with remote indices and remote cluster privileges for a remote cluster: [source,console] -------------------------------------------------- -POST /_security/role/role_with_remote_indices +POST /_security/role/only_remote_access_role { "remote_indices": [ { - "clusters": [ "my_remote" ], <1> + "clusters": ["my_remote"], <1> "names": ["logs*"], <2> "privileges": ["read", "read_cross_cluster", "view_index_metadata"] <3> } + ], + "remote_cluster": [ + { + "clusters": ["my_remote"], <1> + "privileges": ["monitor_stats"] <4> + } ] } -------------------------------------------------- -<1> The remote indices privileges apply to remote cluster with the alias `my_remote`. -<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster ( `my_remote`). +<1> The remote indices and remote cluster privileges apply to remote cluster with the alias `my_remote`. +<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster (`my_remote`). <3> The actual <> granted for `logs*` on `my_remote`. +<4> The actual <> granted for `my_remote`. +Note - only a subset of the cluster privileges are supported for remote clusters. diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index 0da75ac30d2dd..86a81f1d155d2 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -704,5 +704,3 @@ Instead they are only allowed as elements of specific retrievers: * <> * <> * <> -* <> - diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index a38971a0bae6a..8b0b3dc57686e 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -176,10 +176,10 @@ nodes that have a shared cache. ==== Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss -or complications with snapshot handling. +or complications with snapshot handling. For optimal results, allow {ilm-init} to manage -snapshots automatically. +snapshots automatically. <>. ==== @@ -293,6 +293,14 @@ repository. If you wish to search data across multiple regions, configure multiple clusters and use <> or <> instead of {search-snaps}. +It's worth noting that if a searchable snapshot index has no replicas, then when the node +hosting it is shut down, allocation will immediately try to relocate the index to a new node +in order to maximize availability. For fully mounted indices this will result in the new node +downloading the entire index snapshot from the cloud repository. Under a rolling cluster restart, +this may happen multiple times for each searchable snapshot index. Temporarily +disabling allocation during planned node restart will prevent this, as described in +the <>. + [discrete] [[back-up-restore-searchable-snapshots]] === Back up and restore {search-snaps} diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 535d70cbc5e9c..0c3f520605f07 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -249,12 +249,10 @@ The following describes the structure of a remote cluster permissions entry: <> and <>. This field is required. <2> The cluster level privileges for the remote cluster. The allowed values here are a subset of the -<>. This field is required. +<>. +The <> can be used to determine +which privileges are allowed here. This field is required. -The `monitor_enrich` privilege for remote clusters was introduced in version -8.15.0. Currently, this is the only privilege available for remote clusters and -is required to enable users to use the `ENRICH` keyword in ES|QL queries across -clusters. ==== Example diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 747b1eef40441..3b69e5c1ba984 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -250,6 +250,11 @@ Privileges to list and view details on existing repositories and snapshots. + This privilege is not available in {serverless-full}. +`monitor_stats`:: +Privileges to list and view details of stats. ++ +This privilege is not available in {serverless-full}. + `monitor_text_structure`:: All read-only operations related to the <>. + diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 145112ef4d27c..c375ddf076a66 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -118,6 +118,17 @@ If you configure multiple email accounts, you must either configure this setting or specify the email account to use in the <> action. See <>. +`xpack.notification.email.recipient_allowlist`:: +(<>) +Specifies addresses to which emails are allowed to be sent. +Emails with recipients (`To:`, `Cc:`, or `Bcc:`) outside of these patterns will be rejected and an +error thrown. This setting defaults to `["*"]` which means all recipients are allowed. +Simple globbing is supported, such as `list-*@company.com` in the list of allowed recipients. + +NOTE: This setting can't be used at the same time as `xpack.notification.email.account.domain_allowlist` +and an error will be thrown if both are set at the same time. This setting can be used to specify domains +to allow by using a wildcard pattern such as `*@company.com`. + `xpack.notification.email.account`:: Specifies account information for sending notifications via email. You can specify the following email account attributes: @@ -129,6 +140,10 @@ Specifies domains to which emails are allowed to be sent. Emails with recipients `Bcc:`) outside of these domains will be rejected and an error thrown. This setting defaults to `["*"]` which means all domains are allowed. Simple globbing is supported, such as `*.company.com` in the list of allowed domains. + +NOTE: This setting can't be used at the same time as `xpack.notification.email.recipient_allowlist` +and an error will be thrown if both are set at the same time. + -- [[email-account-attributes]] diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index 0e6e1478cfc55..50dc42ac9163d 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -181,7 +181,7 @@ is running. When running {es} in https://azure.microsoft.com/en-gb/products/kubernetes-service[Azure Kubernetes -Service], for instance using {eck-ref}[{eck}], you should use +Service], for instance using {eck-ref}/k8s-snapshots.html#k8s-azure-workload-identity[{eck}], you should use https://azure.github.io/azure-workload-identity/docs/introduction.html[Azure Workload Identity] to provide credentials to {es}. To use Azure Workload Identity, mount the `azure-identity-token` volume as a subdirectory of the diff --git a/gradle.properties b/gradle.properties index 745fb4f9e51ae..aa38a61ab0057 100644 --- a/gradle.properties +++ b/gradle.properties @@ -2,7 +2,7 @@ org.gradle.welcome=never org.gradle.warning.mode=none org.gradle.parallel=true # We need to declare --add-exports to make spotless working seamlessly with jdk16 -org.gradle.jvmargs=-XX:+HeapDumpOnOutOfMemoryError -Xss2m --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED +org.gradle.jvmargs=-XX:+HeapDumpOnOutOfMemoryError -Xss2m --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED --add-opens java.base/java.time=ALL-UNNAMED # Enforce the build to fail on deprecated gradle api usage systemProp.org.gradle.warning.mode=fail diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index d11c4b7fd9c91..e3148c6f3ef2e 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -39,7 +39,7 @@ maven-model = "org.apache.maven:maven-model:3.6.2" mockito-core = "org.mockito:mockito-core:1.9.5" nebula-info = "com.netflix.nebula:gradle-info-plugin:11.3.3" reflections = "org.reflections:reflections:0.9.12" -shadow-plugin = "com.github.breskeby:shadow:3b035f2" +shadow-plugin = "com.gradleup.shadow:shadow-gradle-plugin:8.3.5" snakeyaml = { group = "org.yaml", name = "snakeyaml", version = { strictly = "2.0" } } spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 7c1e11f390f04..5e874b52fc4c6 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -219,6 +219,11 @@ + + + + + @@ -234,16 +239,31 @@ + + + + + + + + + + + + + + + @@ -414,21 +434,16 @@ + + + + + - - - - - - - - - - @@ -614,6 +629,11 @@ + + + + + @@ -799,6 +819,11 @@ + + + + + @@ -1196,6 +1221,11 @@ + + + + + @@ -1236,6 +1266,11 @@ + + + + + @@ -1311,6 +1346,11 @@ + + + + + @@ -1366,9 +1406,9 @@ - - - + + + @@ -1376,9 +1416,9 @@ - - - + + + @@ -1386,29 +1426,29 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -1416,9 +1456,9 @@ - - - + + + @@ -1426,14 +1466,14 @@ - - - + + + - - - + + + @@ -1441,14 +1481,14 @@ - - - + + + - - - + + + @@ -1456,9 +1496,9 @@ - - - + + + @@ -1466,9 +1506,9 @@ - - - + + + @@ -1922,6 +1962,11 @@ + + + + + @@ -1937,6 +1982,11 @@ + + + + + @@ -2282,6 +2332,11 @@ + + + + + @@ -2810,6 +2865,11 @@ + + + + + @@ -2830,6 +2890,11 @@ + + + + + @@ -2965,6 +3030,16 @@ + + + + + + + + + + @@ -3020,6 +3095,11 @@ + + + + + @@ -3393,6 +3473,16 @@ + + + + + + + + + + @@ -3423,11 +3513,21 @@ + + + + + + + + + + @@ -3533,6 +3633,11 @@ + + + + + @@ -3548,66 +3653,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -3618,11 +3788,21 @@ + + + + + + + + + + @@ -3653,6 +3833,11 @@ + + + + + @@ -4153,6 +4338,11 @@ + + + + + @@ -4198,6 +4388,11 @@ + + + + + @@ -4238,6 +4433,11 @@ + + + + + @@ -4423,6 +4623,11 @@ + + + + + diff --git a/libs/entitlement/bridge/build.gradle b/libs/entitlement/bridge/build.gradle index dff5fac1e1c1f..3d59dd3eaf33e 100644 --- a/libs/entitlement/bridge/build.gradle +++ b/libs/entitlement/bridge/build.gradle @@ -9,8 +9,17 @@ apply plugin: 'elasticsearch.build' +configurations { + bridgeJar { + canBeConsumed = true + canBeResolved = false + } +} + +artifacts { + bridgeJar(jar) +} tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } - diff --git a/libs/simdvec/build.gradle b/libs/simdvec/build.gradle index 02f960130e690..ffc50ecb1f6ff 100644 --- a/libs/simdvec/build.gradle +++ b/libs/simdvec/build.gradle @@ -33,7 +33,7 @@ tasks.matching { it.name == "compileMain21Java" }.configureEach { } tasks.named('test').configure { - if (BuildParams.getRuntimeJavaVersion().majorVersion.toInteger() >= 21) { + if (buildParams.getRuntimeJavaVersion().map{ it.majorVersion.toInteger() }.get() >= 21) { jvmArgs '--add-modules=jdk.incubator.vector' } } diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index a1ab6363166cb..5df0a890af753 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -28,7 +28,7 @@ restResources { } } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index c283f9fd93957..c4cdacd135cb4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Map getKey() { - return TimeSeriesIdFieldMapper.decodeTsidAsMap(key); + return RoutingPathFields.decodeAsMap(key); } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index a2fa617ed902b..c74637330dd7a 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -161,11 +162,11 @@ public void collect(int doc, long bucket) throws IOException { if (currentTsidOrd == aggCtx.getTsidHashOrd()) { tsid = currentTsid; } else { - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (TsidConsumer consumer : dimensionConsumers.values()) { - consumer.accept(doc, tsidBuilder); + consumer.accept(doc, routingPathFields); } - currentTsid = tsid = tsidBuilder.buildLegacyTsid().toBytesRef(); + currentTsid = tsid = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); } long bucketOrdinal = bucketOrds.add(bucket, tsid); if (bucketOrdinal < 0) { // already seen @@ -189,6 +190,6 @@ InternalTimeSeries buildResult(InternalTimeSeries.InternalBucket[] topBuckets) { @FunctionalInterface interface TsidConsumer { - void accept(int docId, TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder) throws IOException; + void accept(int docId, RoutingPathFields routingFields) throws IOException; } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java index be841da07ada9..e61c02e0b9cd2 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.AggregationReduceContext; @@ -42,12 +43,12 @@ private List randomBuckets(boolean keyed, InternalAggregations a List> keys = randomKeys(bucketKeys(randomIntBetween(1, 4)), numberOfBuckets); for (int j = 0; j < numberOfBuckets; j++) { long docCount = randomLongBetween(0, Long.MAX_VALUE / (20L * numberOfBuckets)); - var builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingPathFields = new RoutingPathFields(null); for (var entry : keys.get(j).entrySet()) { - builder.addString(entry.getKey(), (String) entry.getValue()); + routingPathFields.addString(entry.getKey(), (String) entry.getValue()); } try { - var key = builder.buildLegacyTsid().toBytesRef(); + var key = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); bucketList.add(new InternalBucket(key, docCount, aggregations, keyed)); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index 26611127a94df..d9a4023457126 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -93,10 +93,10 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1] instanceof Number n) { - builder.addLong(dimensions[i].toString(), n.longValue()); + routingPathFields.addLong(dimensions[i].toString(), n.longValue()); if (dimensions[i + 1] instanceof Integer || dimensions[i + 1] instanceof Long) { fields.add(new NumericDocValuesField(dimensions[i].toString(), ((Number) dimensions[i + 1]).longValue())); } else if (dimensions[i + 1] instanceof Float) { @@ -105,7 +105,7 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens fields.add(new DoubleDocValuesField(dimensions[i].toString(), (double) dimensions[i + 1])); } } else { - builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); } } @@ -118,7 +118,9 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens fields.add(new DoubleDocValuesField(metrics[i].toString(), (double) metrics[i + 1])); } } - fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + fields.add( + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) + ); iw.addDocument(fields); } diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index d5ce1bfc8d93d..b6fc1e3722ccd 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -28,14 +28,14 @@ tasks.withType(StandaloneRestIntegTestTask).configureEach { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // These fail in CI but only when run as part of checkPart2 and not individually. // Tracked in : tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.withType(Test).configureEach { systemProperty 'es.failure_store_feature_flag_enabled', 'true' } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java new file mode 100644 index 0000000000000..2c9b7417b2832 --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; +import org.elasticsearch.action.bulk.IncrementalBulkService; +import org.elasticsearch.action.bulk.IndexDocFailureStoreStatus; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class FailureStoreMetricsWithIncrementalBulkIT extends ESIntegTestCase { + + private static final List METRICS = List.of( + FailureStoreMetrics.METRIC_TOTAL, + FailureStoreMetrics.METRIC_FAILURE_STORE, + FailureStoreMetrics.METRIC_REJECTED + ); + + private static final String DATA_STREAM_NAME = "data-stream-incremental"; + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, TestTelemetryPlugin.class, MapperExtrasPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK.getKey(), "512B") + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK_SIZE.getKey(), "2048B") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK.getKey(), "2KB") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK_SIZE.getKey(), "1024B") + .build(); + } + + public void testShortCircuitFailure() throws Exception { + createDataStreamWithFailureStore(); + + String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {}); + IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode); + try (IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest()) { + + AtomicBoolean nextRequested = new AtomicBoolean(true); + int successfullyStored = 0; + while (nextRequested.get()) { + nextRequested.set(false); + refCounted.incRef(); + handler.addItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, () -> nextRequested.set(true)); + successfullyStored++; + } + assertBusy(() -> assertTrue(nextRequested.get())); + var metrics = collectTelemetry(); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_TOTAL, DATA_STREAM_NAME, successfullyStored); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_FAILURE_STORE, DATA_STREAM_NAME, 0); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_REJECTED, DATA_STREAM_NAME, 0); + + // Introduce artificial pressure that will reject the following requests + String node = findNodeOfPrimaryShard(DATA_STREAM_NAME); + IndexingPressure primaryPressure = internalCluster().getInstance(IndexingPressure.class, node); + long memoryLimit = primaryPressure.stats().getMemoryLimit(); + long primaryRejections = primaryPressure.stats().getPrimaryRejections(); + try (Releasable ignored = primaryPressure.markPrimaryOperationStarted(10, memoryLimit, false)) { + while (primaryPressure.stats().getPrimaryRejections() == primaryRejections) { + while (nextRequested.get()) { + nextRequested.set(false); + refCounted.incRef(); + List> requests = new ArrayList<>(); + for (int i = 0; i < 20; ++i) { + requests.add(indexRequest(DATA_STREAM_NAME)); + } + handler.addItems(requests, refCounted::decRef, () -> nextRequested.set(true)); + } + assertBusy(() -> assertTrue(nextRequested.get())); + } + } + + while (nextRequested.get()) { + nextRequested.set(false); + refCounted.incRef(); + handler.addItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, () -> nextRequested.set(true)); + } + + assertBusy(() -> assertTrue(nextRequested.get())); + + PlainActionFuture future = new PlainActionFuture<>(); + handler.lastItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, future); + + BulkResponse bulkResponse = safeGet(future); + + for (int i = 0; i < bulkResponse.getItems().length; ++i) { + // the first requests were successful + boolean hasFailed = i >= successfullyStored; + assertThat(bulkResponse.getItems()[i].isFailed(), is(hasFailed)); + assertThat(bulkResponse.getItems()[i].getFailureStoreStatus(), is(IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN)); + } + + metrics = collectTelemetry(); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_TOTAL, DATA_STREAM_NAME, bulkResponse.getItems().length); + assertDataStreamMetric( + metrics, + FailureStoreMetrics.METRIC_REJECTED, + DATA_STREAM_NAME, + bulkResponse.getItems().length - successfullyStored + ); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_FAILURE_STORE, DATA_STREAM_NAME, 0); + } + } + + private void createDataStreamWithFailureStore() throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request( + "template-incremental" + ); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(DATA_STREAM_NAME + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .template(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "count": { + "type": "long" + } + } + }"""), null)) + .build() + ); + assertAcked(safeGet(client().execute(TransportPutComposableIndexTemplateAction.TYPE, request))); + + final var createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + DATA_STREAM_NAME + ); + assertAcked(safeGet(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest))); + } + + private static Map> collectTelemetry() { + Map> measurements = new HashMap<>(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + + telemetryPlugin.collect(); + + for (String metricName : METRICS) { + measurements.put(metricName, telemetryPlugin.getLongCounterMeasurement(metricName)); + } + } + return measurements; + } + + private void assertDataStreamMetric(Map> metrics, String metric, String dataStreamName, int expectedValue) { + List measurements = metrics.get(metric); + assertThat(measurements, notNullValue()); + long totalValue = measurements.stream() + .filter(m -> m.attributes().get("data_stream").equals(dataStreamName)) + .mapToLong(Measurement::getLong) + .sum(); + assertThat(totalValue, equalTo((long) expectedValue)); + } + + private static IndexRequest indexRequest(String dataStreamName) { + String time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + String value = "1"; + return new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE) + .source(Strings.format("{\"%s\":\"%s\", \"count\": %s}", DEFAULT_TIMESTAMP_FIELD, time, value), XContentType.JSON); + } + + protected static String findNodeOfPrimaryShard(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); + GetDataStreamAction.Response getDataStreamResponse = safeGet(client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest)); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + DataStream dataStream = getDataStreamResponse.getDataStreams().getFirst().getDataStream(); + assertThat(dataStream.getName(), equalTo(DATA_STREAM_NAME)); + assertThat(dataStream.getIndices().size(), equalTo(1)); + String backingIndex = dataStream.getIndices().getFirst().getName(); + assertThat(backingIndex, backingIndexEqualTo(DATA_STREAM_NAME, 1)); + + Index index = resolveIndex(backingIndex); + int shardId = 0; + for (String node : internalCluster().getNodeNames()) { + var indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + if (indexService != null) { + IndexShard shard = indexService.getShardOrNull(shardId); + if (shard != null && shard.isActive() && shard.routingEntry().primary()) { + return node; + } + } + } + throw new AssertionError("IndexShard instance not found for shard " + new ShardId(index, shardId)); + } +} diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index de5cf3baa744e..3fbf85ab1e702 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -244,17 +244,17 @@ TSDB failures go to failure store: refresh: true body: - '{ "create": { "_index": "fs-k8s"} }' - - '{"@timestamp":"2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"@timestamp":"2021-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "k8s"} }' - - '{ "@timestamp": "2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "@timestamp": "2021-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "fs-k8s"} }' - '{ "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "fs-k8s"} }' - - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "@timestamp":"2000-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "k8s"} }' - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "k8s"} }' - - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "@timestamp":"2000-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - is_true: errors # Successfully indexed to backing index diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 92e843fa31a63..821de8f834a44 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -143,7 +143,7 @@ tasks.named("thirdPartyAudit").configure { ignoreMissingClasses() } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { tasks.named("test").configure { enabled = false } tasks.named("yamlRestTest").configure { enabled = false }; tasks.named("yamlRestCompatTest").configure { enabled = false }; diff --git a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle index b51fa497c8492..8e7d20108a869 100644 --- a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle +++ b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle @@ -24,7 +24,7 @@ dependencies { // once we are ready to test migrations from 8.x to 9.x, we can set the compatible version to 8.0.0 // see https://github.com/elastic/elasticsearch/pull/93666 -BuildParams.bwcVersions.withWireCompatible(v -> v.before("9.0.0")) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(v -> v.before("9.0.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/modules/ingest-user-agent/src/main/java/module-info.java b/modules/ingest-user-agent/src/main/java/module-info.java index e17dab83d5754..ef0af652f50b3 100644 --- a/modules/ingest-user-agent/src/main/java/module-info.java +++ b/modules/ingest-user-agent/src/main/java/module-info.java @@ -10,4 +10,5 @@ module org.elasticsearch.ingest.useragent { requires org.elasticsearch.server; requires org.elasticsearch.xcontent; + requires org.elasticsearch.base; } diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java index 6262c26cb752f..4d71417ec982c 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java @@ -9,7 +9,9 @@ package org.elasticsearch.ingest.useragent; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.Processor; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; @@ -23,6 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.UnaryOperator; import java.util.stream.Stream; public class IngestUserAgentPlugin extends Plugin implements IngestPlugin { @@ -97,4 +100,15 @@ static Map createUserAgentParsers(Path userAgentConfigD public List> getSettings() { return List.of(CACHE_SIZE_SETTING); } + + @Override + public Map> getCustomMetadataUpgraders() { + return Map.of( + IngestMetadata.TYPE, + ingestMetadata -> ((IngestMetadata) ingestMetadata).maybeUpgradeProcessors( + UserAgentProcessor.TYPE, + UserAgentProcessor::maybeUpgradeConfig + ) + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java index 6224bb4d502d7..08ec00e0f04cf 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java @@ -9,9 +9,8 @@ package org.elasticsearch.ingest.useragent; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -32,8 +31,6 @@ public class UserAgentProcessor extends AbstractProcessor { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(UserAgentProcessor.class); - public static final String TYPE = "user_agent"; private final String field; @@ -198,21 +195,13 @@ public UserAgentProcessor create( String processorTag, String description, Map config - ) throws Exception { + ) { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "user_agent"); String regexFilename = readStringProperty(TYPE, processorTag, config, "regex_file", IngestUserAgentPlugin.DEFAULT_PARSER_NAME); List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); boolean extractDeviceType = readBooleanProperty(TYPE, processorTag, config, "extract_device_type", false); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - Object ecsValue = config.remove("ecs"); - if (ecsValue != null) { - deprecationLogger.warn( - DeprecationCategory.SETTINGS, - "ingest_useragent_ecs_settings", - "setting [ecs] is deprecated as ECS format is the default and only option" - ); - } UserAgentParser parser = userAgentParsers.get(regexFilename); if (parser == null) { @@ -272,4 +261,14 @@ public static Property parseProperty(String propertyName) { } } } + + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + // This can be removed in V10. It's not possible to create an instance with the ecs property in V9, and all instances created by V8 or + // earlier will have been fixed when upgraded to V9. + static boolean maybeUpgradeConfig(Map config) { + // Instances created using ES 8.x (or earlier) may have the 'ecs' config entry. + // This was ignored in 8.x and is unsupported in 9.0. + // In 9.x, we should remove it from any existing processors on startup. + return config.remove("ecs") != null; + } } diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java index d9459404987df..471015d579012 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java @@ -331,4 +331,18 @@ public void testExtractDeviceTypeDisabled() { device.put("name", "Other"); assertThat(target.get("device"), is(device)); } + + public void testMaybeUpgradeConfig_removesEcsIfPresent() { + Map config = new HashMap<>(Map.of("field", "user-agent", "ecs", "whatever")); + boolean changed = UserAgentProcessor.maybeUpgradeConfig(config); + assertThat(changed, is(true)); + assertThat(config, is(Map.of("field", "user-agent"))); + } + + public void testMaybeUpgradeConfig_doesNothingIfEcsAbsent() { + Map config = new HashMap<>(Map.of("field", "user-agent")); + boolean changed = UserAgentProcessor.maybeUpgradeConfig(config); + assertThat(changed, is(false)); + assertThat(config, is(Map.of("field", "user-agent"))); + } } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt index a739635e85a9c..875b9a1dac3e8 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt @@ -132,6 +132,21 @@ class org.elasticsearch.script.field.SeqNoDocValuesField @dynamic_type { class org.elasticsearch.script.field.VersionDocValuesField @dynamic_type { } +class org.elasticsearch.script.field.vectors.MultiDenseVector { + MultiDenseVector EMPTY + float[] getMagnitudes() + + Iterator getVectors() + boolean isEmpty() + int getDims() + int size() +} + +class org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField { + MultiDenseVector get() + MultiDenseVector get(MultiDenseVector) +} + class org.elasticsearch.script.field.vectors.DenseVector { DenseVector EMPTY float getMagnitude() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index 7ab9eb32852b6..b2db0d1006d40 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -123,6 +123,11 @@ class org.elasticsearch.index.mapper.vectors.DenseVectorScriptDocValues { float getMagnitude() } +class org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues { + Iterator getVectorValues() + float[] getMagnitudes() +} + class org.apache.lucene.util.BytesRef { byte[] bytes int offset diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml new file mode 100644 index 0000000000000..66cb3f3c46fcc --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml @@ -0,0 +1,178 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ multi_dense_vector_script_access ] + test_runner_features: capabilities + reason: "Support for multi dense vector field script access capability required" + - skip: + features: headers + + - do: + indices.create: + index: test-index + body: + settings: + number_of_shards: 1 + mappings: + properties: + vector: + type: multi_dense_vector + dims: 5 + byte_vector: + type: multi_dense_vector + dims: 5 + element_type: byte + bit_vector: + type: multi_dense_vector + dims: 40 + element_type: bit + - do: + index: + index: test-index + id: "1" + body: + vector: [[230.0, 300.33, -34.8988, 15.555, -200.0], [-0.5, 100.0, -13, 14.8, -156.0]] + byte_vector: [[8, 5, -15, 1, -7], [-1, 115, -3, 4, -128]] + bit_vector: [[8, 5, -15, 1, -7], [-1, 115, -3, 4, -128]] + + - do: + index: + index: test-index + id: "3" + body: + vector: [[0.5, 111.3, -13.0, 14.8, -156.0]] + byte_vector: [[2, 18, -5, 0, -124]] + bit_vector: [[2, 18, -5, 0, -124]] + + - do: + indices.refresh: {} +--- +"Test vector magnitude equality": + - skip: + features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['vector'].magnitudes[0]" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 429.6021, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 192.6447, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['byte_vector'].magnitudes[0]" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "3"} + - close_to: {hits.hits.0._score: {value: 125.41531, error: 0.01}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 19.07878, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['bit_vector'].magnitudes[0]" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 3.872983, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 3.464101, error: 0.01}} +--- +"Test vector value scoring": + - skip: + features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['vector'].vectorValues.next()[0];" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 230, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 0.5, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['byte_vector'].vectorValues.next()[0];" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 8, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 2, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['bit_vector'].vectorValues.next()[0];" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 8, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 2, error: 0.01}} diff --git a/modules/legacy-geo/build.gradle b/modules/legacy-geo/build.gradle index d936276362340..1b4fd9d52bbaf 100644 --- a/modules/legacy-geo/build.gradle +++ b/modules/legacy-geo/build.gradle @@ -26,7 +26,7 @@ dependencies { testImplementation project(":test:framework") } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index 35842ad27643f..a7bdc11e15550 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -24,7 +24,7 @@ restResources { } } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 14a6b1e3f5b82..bb1500ba55664 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -132,7 +132,7 @@ if (OS.current() == OS.WINDOWS) { oldEsDependency.getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); TaskProvider fixture = tasks.register("oldEs${version}Fixture", AntFixture) { dependsOn project.configurations.oldesFixture, jdks.legacy, oldEsDependency - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" // old versions of Elasticsearch need JAVA_HOME env 'JAVA_HOME', jdks.legacy.javaHomePath diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index 86776e743685e..4babac68f1e71 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -321,7 +321,7 @@ tasks.register("workloadIdentityYamlRestTest", RestIntegTestTask) { // omitting key and sas_token so that we use a bearer token from workload identity } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // Cannot override the trust store in FIPS mode, and these tasks require a HTTPS fixture tasks.named("managedIdentityYamlRestTest").configure { enabled = false } tasks.named("workloadIdentityYamlRestTest").configure { enabled = false } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java index 61940be247861..e049d4cd372e6 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java @@ -112,7 +112,7 @@ public void testThrottleResponsesAreCountedInMetrics() throws IOException { blobContainer.blobExists(purpose, blobName); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES).expectMetrics() .withRequests(numThrottles + 1) .withThrottles(numThrottles) .withExceptions(numThrottles) @@ -137,7 +137,7 @@ public void testRangeNotSatisfiedAreCountedInMetrics() throws IOException { assertThrows(RequestedRangeNotSatisfiedException.class, () -> blobContainer.readBlob(purpose, blobName)); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB).expectMetrics() .withRequests(1) .withThrottles(0) .withExceptions(1) @@ -170,7 +170,7 @@ public void testErrorResponsesAreCountedInMetrics() throws IOException { blobContainer.blobExists(purpose, blobName); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES).expectMetrics() .withRequests(numErrors + 1) .withThrottles(throttles.get()) .withExceptions(numErrors) @@ -191,7 +191,7 @@ public void testRequestFailuresAreCountedInMetrics() { assertThrows(IOException.class, () -> blobContainer.listBlobs(purpose)); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.LIST_BLOBS, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.LIST_BLOBS).expectMetrics() .withRequests(4) .withThrottles(0) .withExceptions(4) @@ -322,20 +322,14 @@ private void clearMetrics(String discoveryNode) { .forEach(TestTelemetryPlugin::resetMeter); } - private MetricsAsserter metricsAsserter( - String dataNodeName, - OperationPurpose operationPurpose, - AzureBlobStore.Operation operation, - String repository - ) { - return new MetricsAsserter(dataNodeName, operationPurpose, operation, repository); + private MetricsAsserter metricsAsserter(String dataNodeName, OperationPurpose operationPurpose, AzureBlobStore.Operation operation) { + return new MetricsAsserter(dataNodeName, operationPurpose, operation); } private class MetricsAsserter { private final String dataNodeName; private final OperationPurpose purpose; private final AzureBlobStore.Operation operation; - private final String repository; enum Result { Success, @@ -361,11 +355,10 @@ List getMeasurements(TestTelemetryPlugin testTelemetryPlugin, Strin abstract List getMeasurements(TestTelemetryPlugin testTelemetryPlugin, String name); } - private MetricsAsserter(String dataNodeName, OperationPurpose purpose, AzureBlobStore.Operation operation, String repository) { + private MetricsAsserter(String dataNodeName, OperationPurpose purpose, AzureBlobStore.Operation operation) { this.dataNodeName = dataNodeName; this.purpose = purpose; this.operation = operation; - this.repository = repository; } private class Expectations { @@ -458,7 +451,6 @@ private void assertMatchingMetricRecorded(MetricType metricType, String metricNa .filter( m -> m.attributes().get("operation").equals(operation.getKey()) && m.attributes().get("purpose").equals(purpose.getKey()) - && m.attributes().get("repo_name").equals(repository) && m.attributes().get("repo_type").equals("azure") ) .findFirst() @@ -470,8 +462,6 @@ private void assertMatchingMetricRecorded(MetricType metricType, String metricNa + operation.getKey() + " and purpose=" + purpose.getKey() - + " and repo_name=" - + repository + " in " + measurements ) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index bd21f208faac4..ab3f3ee4f3728 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -402,10 +402,7 @@ public void testMetrics() throws Exception { ) ); metrics.forEach(metric -> { - assertThat( - metric.attributes(), - allOf(hasEntry("repo_type", AzureRepository.TYPE), hasKey("repo_name"), hasKey("operation"), hasKey("purpose")) - ); + assertThat(metric.attributes(), allOf(hasEntry("repo_type", AzureRepository.TYPE), hasKey("operation"), hasKey("purpose"))); final AzureBlobStore.Operation operation = AzureBlobStore.Operation.fromKey((String) metric.attributes().get("operation")); final AzureBlobStore.StatsKey statsKey = new AzureBlobStore.StatsKey( operation, diff --git a/modules/repository-gcs/build.gradle b/modules/repository-gcs/build.gradle index 246611e4803a2..605d886a71056 100644 --- a/modules/repository-gcs/build.gradle +++ b/modules/repository-gcs/build.gradle @@ -178,7 +178,7 @@ tasks.named("thirdPartyAudit").configure { ) - if(BuildParams.graalVmRuntime == false) { + if(buildParams.graalVmRuntime == false) { ignoreMissingClasses( 'org.graalvm.nativeimage.hosted.Feature', 'org.graalvm.nativeimage.hosted.Feature$BeforeAnalysisAccess', @@ -240,7 +240,7 @@ def gcsThirdPartyTest = tasks.register("gcsThirdPartyUnitTest", Test) { systemProperty 'tests.security.manager', false systemProperty 'test.google.bucket', gcsBucket systemProperty 'test.google.fixture', Boolean.toString(useFixture) - nonInputProperties.systemProperty 'test.google.base', gcsBasePath + "_third_party_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.google.base', gcsBasePath + "_third_party_tests_" + buildParams.testSeed if (useFixture == false) { nonInputProperties.systemProperty 'test.google.account', "${-> encodedCredentials.call()}" } diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 346a458a65f85..c1cd1a13719a7 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin */ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' +apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { description 'The S3 repository plugin adds S3 repositories' @@ -48,6 +49,10 @@ dependencies { yamlRestTestImplementation project(':test:fixtures:minio-fixture') internalClusterTestImplementation project(':test:fixtures:minio-fixture') + javaRestTestImplementation project(":test:framework") + javaRestTestImplementation project(':test:fixtures:s3-fixture') + javaRestTestImplementation project(':modules:repository-s3') + yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" } @@ -110,7 +115,7 @@ String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs") String s3STSBucket = System.getenv("amazon_s3_bucket_sts") String s3STSBasePath = System.getenv("amazon_s3_base_path_sts") -boolean s3DisableChunkedEncoding = BuildParams.random.nextBoolean() +boolean s3DisableChunkedEncoding = buildParams.random.nextBoolean() // If all these variables are missing then we are testing against the internal fixture instead, which has the following // credentials hard-coded in. @@ -198,7 +203,7 @@ tasks.register("s3ThirdPartyTest", Test) { systemProperty 'test.s3.account', s3PermanentAccessKey systemProperty 'test.s3.key', s3PermanentSecretKey systemProperty 'test.s3.bucket', s3PermanentBucket - nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + buildParams.testSeed } tasks.named("thirdPartyAudit").configure { diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 6b4dd5ed86e2d..d9480abf21687 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -188,7 +188,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } @Override - @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/88841", value = "com.amazonaws.request:DEBUG") + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/88841", + value = "com.amazonaws.request:DEBUG,com.amazonaws.http.AmazonHttpClient:TRACE" + ) public void testRequestStats() throws Exception { super.testRequestStats(); } @@ -234,7 +237,10 @@ public void testAbortRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } - @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/101608", value = "com.amazonaws.request:DEBUG") + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/101608", + value = "com.amazonaws.request:DEBUG,com.amazonaws.http.AmazonHttpClient:TRACE" + ) public void testMetrics() throws Exception { // Create the repository and perform some activities final String repository = createRepository(randomRepositoryName(), false); @@ -294,10 +300,7 @@ public void testMetrics() throws Exception { ) ); metrics.forEach(metric -> { - assertThat( - metric.attributes(), - allOf(hasEntry("repo_type", S3Repository.TYPE), hasKey("repo_name"), hasKey("operation"), hasKey("purpose")) - ); + assertThat(metric.attributes(), allOf(hasEntry("repo_type", S3Repository.TYPE), hasKey("operation"), hasKey("purpose"))); final S3BlobStore.Operation operation = S3BlobStore.Operation.parse((String) metric.attributes().get("operation")); final S3BlobStore.StatsKey statsKey = new S3BlobStore.StatsKey( operation, diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestIT.java new file mode 100644 index 0000000000000..ead2cb36ad150 --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; + +public class RepositoryS3RestIT extends ESRestTestCase { + + private static final String BUCKET = "RepositoryS3JavaRestTest-bucket"; + private static final String BASE_PATH = "RepositoryS3JavaRestTest-base-path"; + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, "ignored"); + + private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore(keystoreSettings) + .setting("s3.client.default.endpoint", s3Fixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/116811") + public void testReloadCredentialsFromKeystore() throws IOException { + // Register repository (?verify=false because we don't have access to the blob store yet) + final var repositoryName = randomIdentifier(); + registerRepository( + repositoryName, + S3Repository.TYPE, + false, + Settings.builder().put("bucket", BUCKET).put("base_path", BASE_PATH).build() + ); + final var verifyRequest = new Request("POST", "/_snapshot/" + repositoryName + "/_verify"); + + // Set up initial credentials + final var accessKey1 = randomIdentifier(); + s3Fixture.setAccessKey(accessKey1); + keystoreSettings.put("s3.client.default.access_key", accessKey1); + keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + // Check access using initial credentials + assertOK(client().performRequest(verifyRequest)); + + // Rotate credentials in blob store + final var accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); + s3Fixture.setAccessKey(accessKey2); + + // Ensure that initial credentials now invalid + final var accessDeniedException2 = expectThrows(ResponseException.class, () -> client().performRequest(verifyRequest)); + assertThat(accessDeniedException2.getResponse().getStatusLine().getStatusCode(), equalTo(500)); + assertThat( + accessDeniedException2.getMessage(), + allOf(containsString("Bad access key"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied")) + ); + + // Set up refreshed credentials + keystoreSettings.put("s3.client.default.access_key", accessKey2); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + // Check access using refreshed credentials + assertOK(client().performRequest(verifyRequest)); + } + +} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index da357dc09ab95..7407522651e55 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -327,8 +327,6 @@ private Map metricAttributes(String action) { return Map.of( "repo_type", S3Repository.TYPE, - "repo_name", - blobStore.getRepositoryMetadata().name(), "operation", Operation.GET_OBJECT.getKey(), "purpose", diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index b292dc5872994..ac49cffc1e0da 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -1106,7 +1106,7 @@ private List getRetryHistogramMeasurements() { } private Map metricAttributes(String action) { - return Map.of("repo_type", "s3", "repo_name", "repository", "operation", "GetObject", "purpose", "Indices", "action", action); + return Map.of("repo_type", "s3", "operation", "GetObject", "purpose", "Indices", "action", action); } /** diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 8dc718a818cec..13dfdf2b3c7bc 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -177,9 +177,8 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', 'com.github.luben.zstd.Zstd', - 'com.github.luben.zstd.BaseZstdBufferDecompressingStreamNoFinalizer', - 'com.github.luben.zstd.ZstdBufferDecompressingStreamNoFinalizer', - 'com.github.luben.zstd.ZstdDirectBufferDecompressingStreamNoFinalizer', + 'com.github.luben.zstd.ZstdInputStreamNoFinalizer', + 'com.github.luben.zstd.util.Native', 'com.jcraft.jzlib.Deflater', 'com.jcraft.jzlib.Inflater', 'com.jcraft.jzlib.JZlib$WrapperType', @@ -231,8 +230,14 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index 647d38c626c74..3095139ca4685 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -174,7 +174,7 @@ public void testClientConnectionCloseMidStream() throws Exception { // await stream handler is ready and request full content var handler = ctx.awaitRestChannelAccepted(opaqueId); - assertBusy(() -> assertNotNull(handler.stream.buf())); + assertBusy(() -> assertNotEquals(0, handler.stream.bufSize())); assertFalse(handler.streamClosed); @@ -187,7 +187,7 @@ public void testClientConnectionCloseMidStream() throws Exception { // wait for resources to be released assertBusy(() -> { - assertNull(handler.stream.buf()); + assertEquals(0, handler.stream.bufSize()); assertTrue(handler.streamClosed); }); } @@ -204,13 +204,13 @@ public void testServerCloseConnectionMidStream() throws Exception { // await stream handler is ready and request full content var handler = ctx.awaitRestChannelAccepted(opaqueId); - assertBusy(() -> assertNotNull(handler.stream.buf())); + assertBusy(() -> assertNotEquals(0, handler.stream.bufSize())); assertFalse(handler.streamClosed); // terminate connection on server and wait resources are released handler.channel.request().getHttpChannel().close(); assertBusy(() -> { - assertNull(handler.stream.buf()); + assertEquals(0, handler.stream.bufSize()); assertTrue(handler.streamClosed); }); } @@ -226,14 +226,14 @@ public void testServerExceptionMidStream() throws Exception { // await stream handler is ready and request full content var handler = ctx.awaitRestChannelAccepted(opaqueId); - assertBusy(() -> assertNotNull(handler.stream.buf())); + assertBusy(() -> assertNotEquals(0, handler.stream.bufSize())); assertFalse(handler.streamClosed); handler.shouldThrowInsideHandleChunk = true; handler.stream.next(); assertBusy(() -> { - assertNull(handler.stream.buf()); + assertEquals(0, handler.stream.bufSize()); assertTrue(handler.streamClosed); }); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java index 238faa7a9237e..ac3e3aecf97b9 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java @@ -37,12 +37,15 @@ public class Netty4HttpRequestBodyStream implements HttpBody.Stream { private final List tracingHandlers = new ArrayList<>(4); private final ThreadContext threadContext; private ByteBuf buf; - private boolean hasLast = false; private boolean requested = false; private boolean closing = false; private HttpBody.ChunkHandler handler; private ThreadContext.StoredContext requestContext; + // used in tests + private volatile int bufSize = 0; + private volatile boolean hasLast = false; + public Netty4HttpRequestBodyStream(Channel channel, ThreadContext threadContext) { this.channel = channel; this.threadContext = threadContext; @@ -112,11 +115,12 @@ private void addChunk(ByteBuf chunk) { comp.addComponent(true, chunk); buf = comp; } + bufSize = buf.readableBytes(); } // visible for test - ByteBuf buf() { - return buf; + int bufSize() { + return bufSize; } // visible for test @@ -130,6 +134,7 @@ private void send() { var bytesRef = Netty4Utils.toReleasableBytesReference(buf); requested = false; buf = null; + bufSize = 0; try (var ignored = threadContext.restoreExistingContext(requestContext)) { for (var tracer : tracingHandlers) { tracer.onNext(bytesRef, hasLast); @@ -164,6 +169,7 @@ private void doClose() { if (buf != null) { buf.release(); buf = null; + bufSize = 0; } channel.config().setAutoRead(true); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java index 5ff5a27e2d551..d456bbecfbd20 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java @@ -67,7 +67,7 @@ public void testEnqueueChunksBeforeRequest() { for (int i = 0; i < totalChunks; i++) { channel.writeInbound(randomContent(1024)); } - assertEquals(totalChunks * 1024, stream.buf().readableBytes()); + assertEquals(totalChunks * 1024, stream.bufSize()); } // ensures all received chunks can be flushed downstream @@ -119,7 +119,7 @@ public void testReadFromChannel() { channel.writeInbound(randomLastContent(chunkSize)); for (int i = 0; i < totalChunks; i++) { - assertNull("should not enqueue chunks", stream.buf()); + assertEquals("should not enqueue chunks", 0, stream.bufSize()); stream.next(); channel.runPendingTasks(); assertEquals("each next() should produce single chunk", i + 1, gotChunks.size()); diff --git a/muted-tests.yml b/muted-tests.yml index f49b303a2bc50..625813642eb60 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -11,15 +11,9 @@ tests: - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testDataStreams {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111448 -- class: org.elasticsearch.upgrades.FullClusterRestartIT - method: testSnapshotRestore {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111798 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} - issue: https://github.com/elastic/elasticsearch/issues/112191 - class: org.elasticsearch.xpack.esql.action.ManyShardsIT method: testRejection issue: https://github.com/elastic/elasticsearch/issues/112406 @@ -100,9 +94,6 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testGet - issue: https://github.com/elastic/elasticsearch/issues/114135 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/rest-api/usage/line_38} issue: https://github.com/elastic/elasticsearch/issues/113694 @@ -142,9 +133,6 @@ tests: - class: org.elasticsearch.search.SearchServiceTests method: testParseSourceValidation issue: https://github.com/elastic/elasticsearch/issues/115936 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/115970 - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 @@ -156,60 +144,27 @@ tests: - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=cat.shards/10_basic/Help} issue: https://github.com/elastic/elasticsearch/issues/116110 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111774 -- class: org.elasticsearch.upgrades.FullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111777 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116133 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113054 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize ASYNC} issue: https://github.com/elastic/elasticsearch/issues/113055 -- class: org.elasticsearch.xpack.inference.InferenceRestIT - method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} - issue: https://github.com/elastic/elasticsearch/issues/114376 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test force delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116136 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 - class: org.elasticsearch.action.search.SearchPhaseControllerTests method: testProgressListener issue: https://github.com/elastic/elasticsearch/issues/116149 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/forecast/Test forecast unknown job} - issue: https://github.com/elastic/elasticsearch/issues/116150 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=terms_enum/10_basic/Test security} issue: https://github.com/elastic/elasticsearch/issues/116178 - class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT method: testSearchWithRandomDisconnects issue: https://github.com/elastic/elasticsearch/issues/116175 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/start_stop_datafeed/Test start datafeed given index pattern with no matching indices} - issue: https://github.com/elastic/elasticsearch/issues/116220 -- class: org.elasticsearch.search.basic.SearchWhileRelocatingIT - method: testSearchAndRelocateConcurrentlyRandomReplicas - issue: https://github.com/elastic/elasticsearch/issues/116145 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/filter_crud/Test update filter} - issue: https://github.com/elastic/elasticsearch/issues/116271 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/get_datafeeds/Test explicit get all datafeeds} - issue: https://github.com/elastic/elasticsearch/issues/116284 - class: org.elasticsearch.xpack.deprecation.DeprecationHttpIT method: testDeprecatedSettingsReturnWarnings issue: https://github.com/elastic/elasticsearch/issues/108628 @@ -219,9 +174,6 @@ tests: - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests method: testBottomFieldSort issue: https://github.com/elastic/elasticsearch/issues/116249 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} - issue: https://github.com/elastic/elasticsearch/issues/116332 - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 @@ -231,24 +183,9 @@ tests: - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/jobs_crud/Test put job deprecated bucket span} - issue: https://github.com/elastic/elasticsearch/issues/116419 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/explain_data_frame_analytics/Test both job id and body} - issue: https://github.com/elastic/elasticsearch/issues/116433 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/inference_crud/Test force delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116443 - class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT method: testILMDownsampleRollingRestart issue: https://github.com/elastic/elasticsearch/issues/114233 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/data_frame_analytics_crud/Test put config with unknown field in outlier detection analysis} - issue: https://github.com/elastic/elasticsearch/issues/116458 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/evaluate_data_frame/Test outlier_detection with query} - issue: https://github.com/elastic/elasticsearch/issues/116484 - class: org.elasticsearch.xpack.kql.query.KqlQueryBuilderTests issue: https://github.com/elastic/elasticsearch/issues/116487 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests @@ -257,15 +194,60 @@ tests: - class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange issue: https://github.com/elastic/elasticsearch/issues/116523 -- class: org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionsTests - method: testCollapseAndRemoveUnsupportedPrivileges - issue: https://github.com/elastic/elasticsearch/issues/116520 - class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT method: testMatchAllQuery issue: https://github.com/elastic/elasticsearch/issues/116536 +- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT + method: test {categorize.Categorize} + issue: https://github.com/elastic/elasticsearch/issues/116434 +- class: org.elasticsearch.upgrades.SearchStatesIT + method: testBWCSearchStates + issue: https://github.com/elastic/elasticsearch/issues/116617 +- class: org.elasticsearch.upgrades.SearchStatesIT + method: testCanMatch + issue: https://github.com/elastic/elasticsearch/issues/116618 +- class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT + method: testSettingsApplied + issue: https://github.com/elastic/elasticsearch/issues/116694 +- class: org.elasticsearch.snapshots.SnapshotShutdownIT + method: testRestartNodeDuringSnapshot + issue: https://github.com/elastic/elasticsearch/issues/116730 +- class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryGroupsResolverTests + issue: https://github.com/elastic/elasticsearch/issues/116182 - class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116555 + method: test {p0=snapshot/20_operator_privileges_disabled/Operator only settings can be set and restored by non-operator user when operator privileges is disabled} + issue: https://github.com/elastic/elasticsearch/issues/116775 +- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT + method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} + issue: https://github.com/elastic/elasticsearch/issues/116777 +- class: org.elasticsearch.repositories.s3.RepositoryS3RestIT + method: testReloadCredentialsFromKeystore + issue: https://github.com/elastic/elasticsearch/issues/116811 +- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT + issue: https://github.com/elastic/elasticsearch/issues/116851 +- class: org.elasticsearch.xpack.esql.analysis.VerifierTests + method: testCategorizeWithinAggregations + issue: https://github.com/elastic/elasticsearch/issues/116856 +- class: org.elasticsearch.xpack.esql.analysis.VerifierTests + method: testCategorizeSingleGrouping + issue: https://github.com/elastic/elasticsearch/issues/116857 +- class: org.elasticsearch.xpack.esql.analysis.VerifierTests + method: testCategorizeNestedGrouping + issue: https://github.com/elastic/elasticsearch/issues/116858 +- class: org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT + method: testRandomDirectoryIOExceptions + issue: https://github.com/elastic/elasticsearch/issues/114824 +- class: org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValuesTests + method: testFloatGetVectorValueAndGetMagnitude + issue: https://github.com/elastic/elasticsearch/issues/116863 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} + issue: https://github.com/elastic/elasticsearch/issues/116542 +- class: org.elasticsearch.compute.lucene.LuceneQueryExpressionEvaluatorTests + method: testTermQuery + issue: https://github.com/elastic/elasticsearch/issues/116879 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + issue: https://github.com/elastic/elasticsearch/issues/116899 # Examples: # diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 40b12c46c0bfe..f9245ed32c325 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -28,7 +28,7 @@ dependencies { api "com.ibm.icu:icu4j:${versions.icu4j}" } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 6eb5b574b88f9..16786c6c31074 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -63,7 +63,7 @@ TaskProvider createKey = tasks.register("createKey", LoggedExec) { keystore.parentFile.mkdirs() } outputs.file(keystore).withPropertyName('keystoreFile') - executable = "${BuildParams.runtimeJavaHome}/bin/keytool" + executable = "${buildParams.runtimeJavaHome.get()}/bin/keytool" getStandardInput().set('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n') args '-genkey', '-alias', 'test-node', diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index d9e86315d9468..a166a89ad4026 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -57,7 +57,7 @@ tasks.register("writeTestJavaPolicy") { throw new GradleException("failed to create temporary directory [${tmp}]") } final File javaPolicy = file("${tmp}/java.policy") - if (BuildParams.inFipsJvm) { + if (buildParams.inFipsJvm) { javaPolicy.write( [ "grant {", @@ -98,7 +98,7 @@ tasks.named("test").configure { // this is needed to manipulate com.amazonaws.sdk.ec2MetadataServiceEndpointOverride system property // it is better rather disable security manager at all with `systemProperty 'tests.security.manager', 'false'` - if (BuildParams.inFipsJvm){ + if (buildParams.inFipsJvm){ nonInputProperties.systemProperty 'java.security.policy', "=file://${buildDir}/tmp/java.policy" } else { nonInputProperties.systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 5cdcdc59cafe9..aad59be376262 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -56,7 +56,7 @@ tasks.named("yamlRestTest").configure { enabled = false } TaskProvider fixture = tasks.register("ec2Fixture${action}", AntFixture) { dependsOn project.sourceSets.yamlRestTest.runtimeClasspath env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/yamlRestTest${action}-1/config/unicast_hosts.txt" } diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 14a904e107188..a22678b9a67dc 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -32,7 +32,7 @@ restResources { def gceFixtureProvider = tasks.register("gceFixture", AntFixture) { dependsOn project.sourceSets.yamlRestTest.runtimeClasspath env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" args 'org.elasticsearch.cloud.gce.GCEFixture', baseDir, "${buildDir}/testclusters/yamlRestTest-1/config/unicast_hosts.txt" } diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index d0b1163970616..545dfe49bfcf3 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -16,7 +16,7 @@ esplugin { classname 'org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextPlugin' } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 0fa710c130a29..e5108814154a3 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -22,7 +22,7 @@ dependencies { testImplementation project(':modules:lang-painless') } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index 3c47a43788b48..c2251910c3122 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -25,6 +25,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; @@ -136,14 +137,11 @@ public void testWildCardWithFieldsWhenDisabled() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=false")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - assertResponse( + assertResponses( + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")), prepareSearch("test").addFetchField("_size"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) - ); - - assertResponse( prepareSearch("test").addFetchField("*"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + prepareSearch("test").addStoredField("*") ); assertResponse( @@ -156,19 +154,11 @@ public void testWildCardWithFieldsWhenNotProvided() throws Exception { assertAcked(prepareCreate("test")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - assertResponse( + assertResponses( + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")), prepareSearch("test").addFetchField("_size"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) - ); - - assertResponse( prepareSearch("test").addFetchField("*"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) - ); - - assertResponse( - prepareSearch("test").addStoredField("*"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + prepareSearch("test").addStoredField("*") ); } } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 741542477e446..b7f7816a3a0e1 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -84,7 +84,7 @@ tasks.named("dependencyLicenses").configure { tasks.withType(RestIntegTestTask).configureEach { usesDefaultDistribution() - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/plugins/repository-hdfs/hadoop-client-api/build.gradle b/plugins/repository-hdfs/hadoop-client-api/build.gradle index 4ac6f79530fcb..24e4213780fe2 100644 --- a/plugins/repository-hdfs/hadoop-client-api/build.gradle +++ b/plugins/repository-hdfs/hadoop-client-api/build.gradle @@ -1,5 +1,5 @@ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' dependencies { implementation "org.apache.hadoop:hadoop-client-api:${project.parent.versions.hadoop}" diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index 585124f223c9c..ce5b840e6dc91 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -16,7 +16,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /** * We execute tests 3 times. @@ -52,7 +52,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) } - onlyIf("FIPS mode disabled") { BuildParams.inFipsJvm == false } + onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/entitlements/build.gradle b/qa/entitlements/build.gradle new file mode 100644 index 0000000000000..9a5058a3b11ac --- /dev/null +++ b/qa/entitlements/build.gradle @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.base-internal-es-plugin' +apply plugin: 'elasticsearch.internal-java-rest-test' +// Necessary to use tests in Serverless +apply plugin: 'elasticsearch.internal-test-artifact' + +esplugin { + name 'entitlement-qa' + description 'A test module that triggers entitlement checks' + classname 'org.elasticsearch.test.entitlements.EntitlementsCheckPlugin' +} + +dependencies { + clusterPlugins project(':qa:entitlements') +} + +tasks.named("javadoc").configure { + // There seems to be some problem generating javadoc on a QA project that has a module definition + enabled = false +} + diff --git a/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java new file mode 100644 index 0000000000000..8b3629527f918 --- /dev/null +++ b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.entitlements; + +import org.elasticsearch.client.Request; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class EntitlementsIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .plugin("entitlement-qa") + .systemProperty("es.entitlements.enabled", "true") + .setting("xpack.security.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testCheckSystemExit() { + var exception = expectThrows( + IOException.class, + () -> { client().performRequest(new Request("GET", "/_entitlement/_check_system_exit")); } + ); + assertThat(exception.getMessage(), containsString("not_entitled_exception")); + } +} diff --git a/qa/entitlements/src/main/java/module-info.java b/qa/entitlements/src/main/java/module-info.java new file mode 100644 index 0000000000000..cf33ff95d834c --- /dev/null +++ b/qa/entitlements/src/main/java/module-info.java @@ -0,0 +1,5 @@ +module elasticsearch.qa.entitlements { + requires org.elasticsearch.server; + requires org.elasticsearch.base; + requires org.apache.logging.log4j; +} diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java new file mode 100644 index 0000000000000..f3821c065eceb --- /dev/null +++ b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.test.entitlements; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class EntitlementsCheckPlugin extends Plugin implements ActionPlugin { + + @Override + @SuppressForbidden(reason = "Specifically testing System.exit") + public List getRestHandlers( + final Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return Collections.singletonList(new RestEntitlementsCheckSystemExitAction()); + } +} diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java new file mode 100644 index 0000000000000..692c8728cbda0 --- /dev/null +++ b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.entitlements; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestEntitlementsCheckSystemExitAction extends BaseRestHandler { + + private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckSystemExitAction.class); + + RestEntitlementsCheckSystemExitAction() {} + + @Override + public List routes() { + return List.of(new Route(GET, "/_entitlement/_check_system_exit")); + } + + @Override + public String getName() { + return "check_system_exit_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + logger.info("RestEntitlementsCheckSystemExitAction rest handler"); + return channel -> { + logger.info("Calling System.exit(123);"); + System.exit(123); + }; + } +} diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 8d950eea616d6..5e68c4d1ad26b 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -7,14 +7,13 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartArchivedSettingsIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartArchivedSettingsIT.java new file mode 100644 index 0000000000000..caa57f1e605a2 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartArchivedSettingsIT.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import io.netty.handler.codec.http.HttpMethod; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.core.UpdateForV10; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ObjectPath; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.THRESHOLD_SETTING; + +/** + * Tests to run before and after a full cluster restart. This is run twice, + * one with {@code tests.is_old_cluster} set to {@code true} against a cluster + * of an older version. The cluster is shutdown and a cluster of the new + * version is started with the same data directories and then this is rerun + * with {@code tests.is_old_cluster} set to {@code false}. + */ +public class FullClusterRestartArchivedSettingsIT extends ParameterizedFullClusterRestartTestCase { + + private static TemporaryFolder repoDirectory = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .apply(() -> clusterConfig) + .feature(FeatureFlag.TIME_SERIES_MODE) + .feature(FeatureFlag.FAILURE_STORE_ENABLED) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + public FullClusterRestartArchivedSettingsIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this test is just about v8->v9 upgrades, remove it in v10 + public void testBalancedShardsAllocatorThreshold() throws Exception { + assumeTrue("test only applies for v8->v9 upgrades", getOldClusterTestVersion().getMajor() == 8); + + final var chosenValue = randomFrom("0", "0.1", "0.5", "0.999"); + + if (isRunningAgainstOldCluster()) { + final var request = newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent").field(THRESHOLD_SETTING.getKey(), chosenValue).endObject() + ); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); + assertOK(client().performRequest(request)); + } + + final var clusterSettingsResponse = ObjectPath.createFromResponse( + client().performRequest(new Request("GET", "/_cluster/settings")) + ); + + final var settingsPath = "persistent." + THRESHOLD_SETTING.getKey(); + final var settingValue = clusterSettingsResponse.evaluate(settingsPath); + + if (isRunningAgainstOldCluster()) { + assertEquals(chosenValue, settingValue); + } else { + assertNull(settingValue); + assertNotNull(clusterSettingsResponse.evaluate("persistent.archived." + THRESHOLD_SETTING.getKey())); + } + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index fcca3f9a4700c..26e4f3146da2f 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -16,11 +16,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.common.Strings; @@ -29,7 +27,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -75,12 +72,12 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.THRESHOLD_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -90,6 +87,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -1277,12 +1275,16 @@ private void checkSnapshot(String snapshotName, int count, String tookOnVersion, assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); // the format can change depending on the ES node version running & this test code running + // and if there's an in-progress release that hasn't been published yet, + // which could affect the top range of the index release version + String firstReleaseVersion = tookOnIndexVersion.toReleaseVersion().split("-")[0]; assertThat( - XContentMapValues.extractValue("snapshots.version", snapResponse), + (Iterable) XContentMapValues.extractValue("snapshots.version", snapResponse), anyOf( - equalTo(List.of(tookOnVersion)), - equalTo(List.of(tookOnIndexVersion.toString())), - equalTo(List.of(tookOnIndexVersion.toReleaseVersion())) + contains(tookOnVersion), + contains(tookOnIndexVersion.toString()), + contains(firstReleaseVersion), + contains(startsWith(firstReleaseVersion + "-")) ) ); @@ -1953,35 +1955,4 @@ public static void assertNumHits(String index, int numHits, int totalShards) thr assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); assertThat(extractTotalHits(resp), equalTo(numHits)); } - - @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this test is just about v8->v9 upgrades, remove it in v10 - public void testBalancedShardsAllocatorThreshold() throws Exception { - assumeTrue("test only applies for v8->v9 upgrades", getOldClusterTestVersion().getMajor() == 8); - - final var chosenValue = randomFrom("0", "0.1", "0.5", "0.999"); - - if (isRunningAgainstOldCluster()) { - final var request = newXContentRequest( - HttpMethod.PUT, - "/_cluster/settings", - (builder, params) -> builder.startObject("persistent").field(THRESHOLD_SETTING.getKey(), chosenValue).endObject() - ); - request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); - assertOK(client().performRequest(request)); - } - - final var clusterSettingsResponse = ObjectPath.createFromResponse( - client().performRequest(new Request("GET", "/_cluster/settings")) - ); - - final var settingsPath = "persistent." + THRESHOLD_SETTING.getKey(); - final var settingValue = clusterSettingsResponse.evaluate(settingsPath); - - if (isRunningAgainstOldCluster()) { - assertEquals(chosenValue, settingValue); - } else { - assertNull(settingValue); - assertNotNull(clusterSettingsResponse.evaluate("persistent.archived." + THRESHOLD_SETTING.getKey())); - } - } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index f3fd57f3fc8ae..f6549a2d83fe6 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -64,8 +63,7 @@ excludeList.add('indices.resolve_index/20_resolve_system_index/*') // Excluded because the error has changed excludeList.add('aggregations/percentiles_hdr_metric/Negative values test') -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> - +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> if (bwcVersion != VersionProperties.getElasticsearchVersion()) { /* This project runs the core REST tests against a 4 node cluster where two of the nodes has a different minor. */ diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 146acedd164b2..906a49134bb51 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -35,7 +35,7 @@ def ccsSupportedVersion = bwcVersion -> { return currentVersion.minor == 0 || (currentVersion.major == bwcVersion.major && currentVersion.minor - bwcVersion.minor <= 1) } -BuildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, baseName -> def remoteCluster = testClusters.register("${baseName}-remote") { numberOfNodes = 2 diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index 17888efaa2b49..79a8be4c1be24 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -16,7 +16,7 @@ apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> String oldClusterName = "${baseName}-old" String newClusterName = "${baseName}-new" diff --git a/qa/rolling-upgrade-legacy/build.gradle b/qa/rolling-upgrade-legacy/build.gradle index 4ebb3888e9f20..e1c31fd50c0d4 100644 --- a/qa/rolling-upgrade-legacy/build.gradle +++ b/qa/rolling-upgrade-legacy/build.gradle @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -18,7 +17,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /* * NOTE: This module is for the tests that were problematic when converting :qa:rolling-upgrade to the junit-based bwc test definition * Over time, these should be migrated into the :qa:rolling-upgrade module and fixed properly diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index ef31f6421c187..2f717f201f248 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -18,7 +18,7 @@ testArtifacts { registerTestArtifactFromSourceSet(sourceSets.javaRestTest) } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index af4e55a709a64..c707c2b5e8c80 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -8,7 +8,6 @@ */ import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.legacy-yaml-rest-test' diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index f74ee7c59b26b..ee29da53dc51b 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -19,7 +19,7 @@ dependencies { testImplementation project(':modules:rest-root') } -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { version = bwcVersion.toString() setting 'xpack.security.enabled', 'true' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json index 745136848786c..cb4eee007a246 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html", "description": "Delete an inference endpoint" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json index 7b7aa0f56fcbc..14e7519c3796e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html", "description":"Get an inference endpoint" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json index 3195476ce1e9e..eb4c1268c28ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html", "description":"Perform inference" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json index 9ff5ff4b80c58..411392fe39908 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html", "description":"Configure an inference endpoint for use in the Inference API" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json index 32b4b2f311837..493306e10d5c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html", "description":"Perform streaming inference" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "text/event-stream"], diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml index 2079c01079ce1..c47df413df9e7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml @@ -1,8 +1,8 @@ setup: - - skip: + - requires: cluster_features: [ "gte_v7.8.0" ] reason: "index template v2 API unavailable before 7.8" - features: allowed_warnings + test_runner_features: allowed_warnings - do: allowed_warnings: @@ -92,10 +92,9 @@ setup: --- "Add data stream lifecycle": - - skip: + - requires: cluster_features: ["gte_v8.11.0"] reason: "Data stream lifecycle in index templates was updated after 8.10" - features: allowed_warnings - do: allowed_warnings: @@ -127,10 +126,9 @@ setup: --- "Get data stream lifecycle with default rollover": - - skip: + - requires: cluster_features: ["gte_v8.11.0"] reason: "Data stream lifecycle in index templates was updated after 8.10" - features: allowed_warnings - do: allowed_warnings: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/16_creation_date_tier_preference.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/16_creation_date_tier_preference.yml new file mode 100644 index 0000000000000..6ecd9c3e9c2ce --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/16_creation_date_tier_preference.yml @@ -0,0 +1,14 @@ +--- +"Ensure index creation date and tier preference are exposed": + - requires: + cluster_features: ["stats.tier_creation_date"] + reason: index creation date and tier preference added to stats in 8.17 + + - do: + indices.create: + index: myindex + - do: + indices.stats: {} + + - is_true: indices.myindex.creation_date + - is_true: indices.myindex.tier_preference diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index db718959919da..54b2bf59c8ddc 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -129,7 +129,7 @@ noop update: {} --- -update: +regular update: - requires: cluster_features: ["gte_v8.2.0"] reason: tsdb indexing changed in 8.2.0 diff --git a/server/build.gradle b/server/build.gradle index ef64b0746dfc4..bc8decfa8babc 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -133,7 +133,7 @@ def generatePluginsList = tasks.register("generatePluginsList") { sourceSets.main.output.dir(generatedResourcesDir) sourceSets.main.compiledBy(generateModulesList, generatePluginsList) -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' systemProperty 'es.failure_store_feature_flag_enabled', 'true' diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 34170d7c0f747..e45555b1dec19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -99,7 +99,11 @@ public void testBulkWithWriteIndexAndRouting() { // allowing the auto-generated timestamp to externally be set would allow making the index inconsistent with duplicate docs public void testExternallySetAutoGeneratedTimestamp() { IndexRequest indexRequest = new IndexRequest("index1").source(Collections.singletonMap("foo", "baz")); - indexRequest.autoGenerateId(); + if (randomBoolean()) { + indexRequest.autoGenerateId(); + } else { + indexRequest.autoGenerateTimeBasedId(); + } if (randomBoolean()) { indexRequest.id("test"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java index bfe46dc4c90f2..b3ec4a5331180 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -9,8 +9,13 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterInfoServiceUtils; +import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.telemetry.TestTelemetryPlugin; @@ -53,11 +58,19 @@ public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() thro } } - public void testDesiredBalanceNodeWeightMetrics() { + public void testDesiredBalanceMetrics() { internalCluster().startNodes(2); prepareCreate("test").setSettings(indexSettings(2, 1)).get(); - indexRandom(randomBoolean(), "test", between(50, 100)); ensureGreen(); + + indexRandom(randomBoolean(), "test", between(50, 100)); + flush("test"); + // Make sure new cluster info is available + final var infoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); + ClusterInfoServiceUtils.setUpdateFrequency(infoService, TimeValue.timeValueMillis(200)); + assertNotNull("info should not be null", ClusterInfoServiceUtils.refresh(infoService)); + ClusterRerouteUtils.reroute(client()); // ensure we leverage the latest cluster info + final var telemetryPlugin = getTelemetryPlugin(internalCluster().getMasterName()); telemetryPlugin.collect(); assertThat(telemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), not(empty())); @@ -68,42 +81,89 @@ public void testDesiredBalanceNodeWeightMetrics() { var nodeIds = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); var nodeNames = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getName).collect(Collectors.toSet()); - final var nodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME ); - assertThat(nodeWeightsMetrics.size(), equalTo(2)); - for (var nodeStat : nodeWeightsMetrics) { - assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat(desiredBalanceNodeWeightsMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeWeightsMetrics) { + assertTrue(nodeStat.isDouble()); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + final var desiredBalanceNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME ); - assertThat(nodeShardCountMetrics.size(), equalTo(2)); - for (var nodeStat : nodeShardCountMetrics) { + assertThat(desiredBalanceNodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeShardCountMetrics) { assertThat(nodeStat.value().longValue(), equalTo(2L)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME ); - assertThat(nodeWriteLoadMetrics.size(), equalTo(2)); - for (var nodeStat : nodeWriteLoadMetrics) { + assertThat(desiredBalanceNodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeWriteLoadMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME ); - assertThat(nodeDiskUsageMetrics.size(), equalTo(2)); - for (var nodeStat : nodeDiskUsageMetrics) { + assertThat(desiredBalanceNodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeDiskUsageMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } + final var currentNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME + ); + assertThat(currentNodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeShardCountMetrics) { + assertThat(nodeStat.value().longValue(), equalTo(2L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME + ); + assertThat(currentNodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeWriteLoadMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeDiskUsageMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME + ); + assertThat(currentNodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeDiskUsageMetrics) { + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + assertTrue(currentNodeDiskUsageMetrics.stream().anyMatch(m -> m.getLong() > 0L)); + final var currentNodeUndesiredShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME + ); + assertThat(currentNodeUndesiredShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeUndesiredShardCountMetrics) { + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeForecastedDiskUsageMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME + ); + assertThat(currentNodeForecastedDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeForecastedDiskUsageMetrics) { + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + assertTrue(currentNodeForecastedDiskUsageMetrics.stream().anyMatch(m -> m.getLong() > 0L)); } private static void assertOnlyMasterIsPublishingMetrics() { @@ -136,6 +196,17 @@ private static void assertMetricsAreBeingPublished(String nodeName, boolean shou testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME), matcher ); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME), matcher); + assertThat( + testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME), + matcher + ); + assertThat( + testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME), + matcher + ); } private static TestTelemetryPlugin getTelemetryPlugin(String nodeName) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index f51dd87e8eeff..f41277c5b80ca 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -287,7 +287,7 @@ public void testWildcardBehaviour() throws Exception { verify(indicesStats(indices), false); verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); - verify(validateQuery(indices), true); + verify(validateQuery(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -338,7 +338,7 @@ public void testWildcardBehaviour() throws Exception { verify(indicesStats(indices), false); verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); - verify(validateQuery(indices), true); + verify(validateQuery(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 039a596f53b38..38eef4f720623 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -19,14 +19,19 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; +import org.elasticsearch.indices.recovery.RecoveryFilesInfoRequest; import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -34,6 +39,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -72,16 +78,14 @@ public void testCancelRecoveryAndResume() throws Exception { // we use 2 nodes a lucky and unlucky one // the lucky one holds the primary // the unlucky one gets the replica and the truncated leftovers - NodeStats primariesNode = dataNodeStats.get(0); - NodeStats unluckyNode = dataNodeStats.get(1); + String primariesNode = dataNodeStats.get(0).getNode().getName(); + String unluckyNode = dataNodeStats.get(1).getNode().getName(); // create the index and prevent allocation on any other nodes than the lucky one // we have no replicas so far and make sure that we allocate the primary on the lucky node assertAcked( prepareCreate("test").setMapping("field1", "type=text", "the_id", "type=text") - .setSettings( - indexSettings(numberOfShards(), 0).put("index.routing.allocation.include._name", primariesNode.getNode().getName()) - ) + .setSettings(indexSettings(numberOfShards(), 0).put("index.routing.allocation.include._name", primariesNode)) ); // only allocate on the lucky node // index some docs and check if they are coming back @@ -102,20 +106,54 @@ public void testCancelRecoveryAndResume() throws Exception { indicesAdmin().prepareFlush().setForce(true).get(); // double flush to create safe commit in case of async durability indicesAdmin().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get(); + // We write some garbage into the shard directory so that we can verify that it is cleaned up before we resend. + // Cleanup helps prevent recovery from failing due to lack of space from garbage left over from a previous + // recovery that crashed during file transmission. #104473 + // We can't look for the presence of the recovery temp files themselves because they are automatically + // cleaned up on clean shutdown by MultiFileWriter. + final String GARBAGE_PREFIX = "recovery.garbage."; + final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean truncate = new AtomicBoolean(true); + + IndicesService unluckyIndices = internalCluster().getInstance(IndicesService.class, unluckyNode); + Function getUnluckyIndexPath = (shardId) -> unluckyIndices.indexService(shardId.getIndex()) + .getShard(shardId.getId()) + .shardPath() + .resolveIndex(); + for (NodeStats dataNode : dataNodeStats) { MockTransportService.getInstance(dataNode.getNode().getName()) .addSendBehavior( - internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + internalCluster().getInstance(TransportService.class, unluckyNode), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; logger.info("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + // During the first recovery attempt (when truncate is set), write an extra garbage file once for each + // file transmitted. We get multiple chunks per file but only one is the last. + if (truncate.get() && req.lastChunk()) { + final var shardPath = getUnluckyIndexPath.apply(req.shardId()); + final var garbagePath = Files.createTempFile(shardPath, GARBAGE_PREFIX, null); + logger.info("writing garbage at: {}", garbagePath); + } if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); throw new RuntimeException("Caused some truncated files for fun and profit"); } + } else if (action.equals(PeerRecoveryTargetService.Actions.FILES_INFO)) { + // verify there are no garbage files present at the FILES_INFO stage of recovery. This precedes FILES_CHUNKS + // and so will run before garbage has been introduced on the first attempt, and before post-transfer cleanup + // has been performed on the second. + final var shardPath = getUnluckyIndexPath.apply(((RecoveryFilesInfoRequest) request).shardId()); + try (var list = Files.list(shardPath).filter(path -> path.getFileName().startsWith(GARBAGE_PREFIX))) { + final var garbageFiles = list.toArray(); + assertArrayEquals( + "garbage files should have been cleaned before file transmission", + new Path[0], + garbageFiles + ); + } } connection.sendRequest(requestId, action, request, options); } @@ -128,14 +166,14 @@ public void testCancelRecoveryAndResume() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put( "index.routing.allocation.include._name", // now allow allocation on all nodes - primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName() + primariesNode + "," + unluckyNode ), "test" ); latch.await(); - // at this point we got some truncated left overs on the replica on the unlucky node + // at this point we got some truncated leftovers on the replica on the unlucky node // now we are allowing the recovery to allocate again and finish to see if we wipe the truncated files truncate.compareAndSet(true, false); ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 0d06856ca1088..4799b4bec0c8d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -64,6 +64,8 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw } indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()])); assertHitCount(prepareSearch(), (numDocs)); + // hold a copy of the node names before a new node is potentially added later + String[] nodeNamesBeforeClusterResize = internalCluster().getNodeNames(); final int numIters = scaledRandomIntBetween(5, 20); for (int i = 0; i < numIters; i++) { final AtomicBoolean stop = new AtomicBoolean(false); @@ -76,34 +78,37 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw public void run() { try { while (stop.get() == false) { - assertResponse(prepareSearch().setSize(numDocs), response -> { - if (response.getHits().getTotalHits().value() != numDocs) { - // if we did not search all shards but had no serious failures that is potentially fine - // if only the hit-count is wrong. this can happen if the cluster-state is behind when the - // request comes in. It's a small window but a known limitation. - if (response.getTotalShards() != response.getSuccessfulShards() - && Stream.of(response.getShardFailures()) - .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { - nonCriticalExceptions.add( - "Count is " - + response.getHits().getTotalHits().value() - + " but " - + numDocs - + " was expected. " - + formatShardStatus(response) - ); - } else { - assertHitCount(response, numDocs); + assertResponse( + client(randomFrom(nodeNamesBeforeClusterResize)).prepareSearch().setSize(numDocs), + response -> { + if (response.getHits().getTotalHits().value() != numDocs) { + // if we did not search all shards but had no serious failures that is potentially fine + // if only the hit-count is wrong. this can happen if the cluster-state is behind when the + // request comes in. It's a small window but a known limitation. + if (response.getTotalShards() != response.getSuccessfulShards() + && Stream.of(response.getShardFailures()) + .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { + nonCriticalExceptions.add( + "Count is " + + response.getHits().getTotalHits().value() + + " but " + + numDocs + + " was expected. " + + formatShardStatus(response) + ); + } else { + assertHitCount(response, numDocs); + } } - } - final SearchHits sh = response.getHits(); - assertThat( - "Expected hits to be the same size the actual hits array", - sh.getTotalHits().value(), - equalTo((long) (sh.getHits().length)) - ); - }); + final SearchHits sh = response.getHits(); + assertThat( + "Expected hits to be the same size the actual hits array", + sh.getTotalHits().value(), + equalTo((long) (sh.getHits().length)) + ); + } + ); // this is the more critical but that we hit the actual hit array has a different size than the // actual number of hits. } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 2fde645f0036b..4688201c66201 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -21,7 +21,9 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortMode; @@ -1581,6 +1583,64 @@ public void testCheckFixedBitSetCache() throws Exception { assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); } + public void testSkipNestedInnerHits() throws Exception { + assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); + ensureGreen(); + + prepareIndex("test").setId("1") + .setSource( + jsonBuilder().startObject() + .field("field1", "value1") + .startArray("nested1") + .startObject() + .field("n_field1", "foo") + .field("n_field2", "bar") + .endObject() + .endArray() + .endObject() + ) + .get(); + + waitForRelocation(ClusterHealthStatus.GREEN); + GetResponse getResponse = client().prepareGet("test", "1").get(); + assertThat(getResponse.isExists(), equalTo(true)); + assertThat(getResponse.getSourceAsBytesRef(), notNullValue()); + refresh(); + + assertNoFailuresAndResponse( + prepareSearch("test").setSource( + new SearchSourceBuilder().query( + QueryBuilders.nestedQuery("nested1", QueryBuilders.termQuery("nested1.n_field1", "foo"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder()) + ) + ), + res -> { + assertNotNull(res.getHits()); + assertHitCount(res, 1); + assertThat(res.getHits().getHits().length, equalTo(1)); + // by default we should get inner hits + assertNotNull(res.getHits().getHits()[0].getInnerHits()); + assertNotNull(res.getHits().getHits()[0].getInnerHits().get("nested1")); + } + ); + + assertNoFailuresAndResponse( + prepareSearch("test").setSource( + new SearchSourceBuilder().query( + QueryBuilders.nestedQuery("nested1", QueryBuilders.termQuery("nested1.n_field1", "foo"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder()) + ).skipInnerHits(true) + ), + res -> { + assertNotNull(res.getHits()); + assertHitCount(res, 1); + assertThat(res.getHits().getHits().length, equalTo(1)); + // if we explicitly say to ignore inner hits, then this should now be null + assertNull(res.getHits().getHits()[0].getInnerHits()); + } + ); + } + private void assertDocumentCount(String index, long numdocs) { IndicesStatsResponse stats = indicesAdmin().prepareStats(index).clear().setDocs(true).get(); assertNoFailures(stats); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 0fd2bd6f94770..3f6f7af56eb08 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -57,6 +57,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -772,6 +773,7 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ); // counter example assertHitCount( + 0L, prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( @@ -779,19 +781,13 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ).operator(Operator.AND) ) ), - 0L - ); - - // counter example - assertHitCount( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( randomBoolean() ? MultiMatchQueryBuilder.Type.CROSS_FIELDS : MultiMatchQueryBuilder.DEFAULT_TYPE ).operator(Operator.AND) ) - ), - 0L + ) ); // test if boosts work @@ -828,40 +824,21 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException } ); // Test group based on numeric fields - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + }, prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - // Two numeric fields together caused trouble at one point! - assertResponse( + // Two numeric fields together caused trouble at one point! prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } + ) ); assertResponse( prepareSearch("test").setQuery( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index cffba49d5941c..118aa00fc1b4f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -108,6 +108,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -216,21 +217,14 @@ public void testConstantScoreQuery() throws Exception { assertThat(searchHit, hasScore(1.0f)); } }); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + }, + prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), prepareSearch("test").setQuery( boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) - ), - response -> { - assertHitCount(response, 2L); - assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); - } - ); - assertResponse( - prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), - response -> { - assertHitCount(response, 2L); - assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); - } + ) ); assertResponse( prepareSearch("test").setQuery( @@ -800,20 +794,18 @@ public void testSpecialRangeSyntaxInQueryString() { prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - assertResponse(prepareSearch().setQuery(queryStringQuery("num:>19")), response -> { + assertResponses(response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("2")); - }); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); + }, prepareSearch().setQuery(queryStringQuery("num:>19")), prepareSearch().setQuery(queryStringQuery("num:>=20"))); - assertResponse(prepareSearch().setQuery(queryStringQuery("num:>=20")), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("2")); - }); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>11")), 2L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<20")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<=20")), 2L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("+num:>11 +num:<20")), 1L); + assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); + assertHitCount(2L, prepareSearch().setQuery(queryStringQuery("num:>11")), prepareSearch().setQuery(queryStringQuery("num:<=20"))); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("num:<20")), + prepareSearch().setQuery(queryStringQuery("+num:>11 +num:<20")) + ); } public void testEmptytermsQuery() throws Exception { @@ -826,8 +818,11 @@ public void testEmptytermsQuery() throws Exception { prepareIndex("test").setId("3").setSource("term", "3"), prepareIndex("test").setId("4").setSource("term", "4") ); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("term", new String[0]))), 0L); - assertHitCount(prepareSearch("test").setQuery(idsQuery()), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("term", new String[0]))), + prepareSearch("test").setQuery(idsQuery()) + ); } public void testTermsQuery() throws Exception { @@ -866,9 +861,12 @@ public void testTermsQuery() throws Exception { assertSearchHitsWithoutFailures(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("dbl", new double[] { 2, 5 }))), "2"); assertSearchHitsWithoutFailures(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("lng", new long[] { 2, 5 }))), "2"); // test valid type, but no matching terms - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("str", "5", "6"))), 0L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("dbl", new double[] { 5, 6 }))), 0L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("lng", new long[] { 5, 6 }))), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("str", "5", "6"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("dbl", new double[] { 5, 6 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("lng", new long[] { 5, 6 }))) + ); } public void testTermsLookupFilter() throws Exception { @@ -1064,106 +1062,35 @@ public void testNumericTermsAndRanges() throws Exception { .get(); refresh(); - logger.info("--> term query on 1"); - assertResponse(prepareSearch("test").setQuery(termQuery("num_byte", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_short", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_integer", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_long", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_float", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_double", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - logger.info("--> terms query on 1"); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - logger.info("--> term filter on 1"); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), response -> { + assertResponses(response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - logger.info("--> terms filter on 1"); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); + }, + prepareSearch("test").setQuery(termQuery("num_byte", 1)), + prepareSearch("test").setQuery(termQuery("num_short", 1)), + prepareSearch("test").setQuery(termQuery("num_integer", 1)), + prepareSearch("test").setQuery(termQuery("num_long", 1)), + prepareSearch("test").setQuery(termQuery("num_float", 1)), + prepareSearch("test").setQuery(termQuery("num_double", 1)), + prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))) + ); } public void testNumericRangeFilter_2826() throws Exception { @@ -1301,16 +1228,19 @@ public void testSpanMultiTermQuery() throws IOException { prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))), 4); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))), 4); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))), 3); assertHitCount( + 4, + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))), + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))) + ); + assertHitCount( + 3, + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))), prepareSearch("test").setQuery( spanOrQuery(spanMultiTermQueryBuilder(QueryBuilders.rangeQuery("description").from("ffa").to("foo"))) ), - 3 + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))) ); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))), 3); } public void testSpanNot() throws IOException, ExecutionException, InterruptedException { @@ -1321,6 +1251,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc refresh(); assertHitCount( + 1L, prepareSearch("test").setQuery( spanNotQuery( spanNearQuery(QueryBuilders.spanTermQuery("description", "quick"), 1).addClause( @@ -1329,9 +1260,6 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc spanTermQuery("description", "brown") ) ), - 1L - ); - assertHitCount( prepareSearch("test").setQuery( spanNotQuery( spanNearQuery(QueryBuilders.spanTermQuery("description", "quick"), 1).addClause( @@ -1340,9 +1268,6 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc spanTermQuery("description", "sleeping") ).dist(5) ), - 1L - ); - assertHitCount( prepareSearch("test").setQuery( spanNotQuery( spanNearQuery(QueryBuilders.spanTermQuery("description", "quick"), 1).addClause( @@ -1350,8 +1275,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc ), spanTermQuery("description", "jumped") ).pre(1).post(1) - ), - 1L + ) ); } @@ -1423,22 +1347,19 @@ public void testSimpleDFSQuery() throws IOException { public void testMultiFieldQueryString() { prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - - logger.info("regular"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:value1")), 1); - logger.info("prefix"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value*").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:value*")), 1); - logger.info("wildcard"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("v?lue*").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:v?lue*")), 1); - logger.info("fuzzy"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value~").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:value~")), 1); - logger.info("regexp"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("/value[01]/").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:/value[01]/")), 1); + assertHitCount( + 1, + prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:value1")), + prepareSearch("test").setQuery(queryStringQuery("value*").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:value*")), + prepareSearch("test").setQuery(queryStringQuery("v?lue*").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:v?lue*")), + prepareSearch("test").setQuery(queryStringQuery("value~").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:value~")), + prepareSearch("test").setQuery(queryStringQuery("/value[01]/").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:/value[01]/")) + ); } // see #3797 @@ -1448,9 +1369,12 @@ public void testMultiMatchLenientIssue3797() { prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); - assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), 1L); - assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), 1L); - assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2").field("field2", 2).lenient(true)), 1L); + assertHitCount( + 1L, + prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), + prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), + prepareSearch("test").setQuery(multiMatchQuery("value2").field("field2", 2).lenient(true)) + ); } public void testMinScore() throws ExecutionException, InterruptedException { @@ -1483,24 +1407,15 @@ public void testQueryStringWithSlopAndFields() { assertHitCount(prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), 2); assertHitCount( + 1, prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) .setQuery(QueryBuilders.queryStringQuery("\"one two\"").field("desc")), - 1 - ); - assertHitCount( prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "product")) .setQuery(QueryBuilders.queryStringQuery("\"one three\"~5").field("desc")), - 1 - ); - assertHitCount( prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), - 1 - ); - assertHitCount( prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) - .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), - 1 + .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")) ); } @@ -1602,23 +1517,16 @@ public void testRangeQueryWithTimeZone() throws Exception { assertThat(response.getHits().getAt(0).getId(), is("2")); } ); - assertResponse( - prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") - ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("3")); - } - ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + }, prepareSearch("test").setQuery( QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("3")); - } + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") + ) ); assertResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")), response -> { assertHitCount(response, 1L); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 9a7ce2c5c28ab..c59c4a045a36b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -33,6 +33,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -68,25 +69,20 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { "_prefer_nodes:somenode,server2" }; for (String pref : preferences) { logger.info("--> Testing out preference={}", pref); - assertResponse(prepareSearch().setSize(0).setPreference(pref), response -> { + assertResponses(response -> { assertThat(RestStatus.OK, equalTo(response.status())); assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); - }); - assertResponse(prepareSearch().setPreference(pref), response -> { - assertThat(RestStatus.OK, equalTo(response.status())); - assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); - }); + }, prepareSearch().setSize(0).setPreference(pref), prepareSearch().setPreference(pref)); } // _only_local is a stricter preference, we need to send the request to a data node - assertResponse(dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), response -> { + assertResponses(response -> { assertThat(RestStatus.OK, equalTo(response.status())); assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); - }); - assertResponse(dataNodeClient().prepareSearch().setPreference("_only_local"), response -> { - assertThat(RestStatus.OK, equalTo(response.status())); - assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); - }); + }, + dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), + dataNodeClient().prepareSearch().setPreference("_only_local") + ); } public void testNoPreferenceRandom() { @@ -121,19 +117,11 @@ public void testSimplePreference() { prepareIndex("test").setSource("field1", "value1").get(); refresh(); - assertResponse( + assertResponses( + response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)), prepareSearch().setQuery(matchAllQuery()), - response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)) - ); - - assertResponse( prepareSearch().setQuery(matchAllQuery()).setPreference("_local"), - response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)) - ); - - assertResponse( - prepareSearch().setQuery(matchAllQuery()).setPreference("1234"), - response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)) + prepareSearch().setQuery(matchAllQuery()).setPreference("1234") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 0ceef5d3c70f1..8b21bb54361b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -61,6 +61,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -159,21 +160,13 @@ public void testTextAndGlobalText() throws Exception { } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder noText = SuggestBuilders.completionSuggestion(FIELD); - assertResponse( - prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg")), - response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") - ); - CompletionSuggestionBuilder withText = SuggestBuilders.completionSuggestion(FIELD).text("sugg"); - assertResponse( + assertResponses( + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"), + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg")), prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText)), - response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") - ); - - // test that suggestion text takes precedence over global text - assertResponse( - prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")), - response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + // test that suggestion text takes precedence over global text + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 37d2f4e1a9387..388421b6dd53f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -9,16 +9,18 @@ package org.elasticsearch.validate; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -207,12 +209,8 @@ public void testExplainDateRangeInQueryString() { } public void testValidateEmptyCluster() { - try { - indicesAdmin().prepareValidateQuery().get(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index [_all] and no indices exist")); - } + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery().get(); + assertThat(response.getTotalShards(), is(0)); } public void testExplainNoQuery() { @@ -379,4 +377,52 @@ public void testExplainTermsQueryWithLookup() { ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("twitter").setQuery(termsLookupQuery).setExplain(true).get(); assertThat(response.isValid(), is(true)); } + + public void testOneClosedIndex() { + createIndex("test"); + + boolean ignoreUnavailable = false; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test")).actionGet(); + IndexClosedException ex = expectThrows( + IndexClosedException.class, + indicesAdmin().prepareValidateQuery("test").setIndicesOptions(options) + ); + assertEquals("closed", ex.getMessage()); + } + + public void testOneClosedIndexIgnoreUnavailable() { + createIndex("test"); + + boolean ignoreUnavailable = true; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test")).actionGet(); + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("test").setIndicesOptions(options).get(); + assertThat(response.getTotalShards(), is(0)); + } + + public void testTwoIndicesOneClosed() { + createIndex("test1"); + createIndex("test2"); + + boolean ignoreUnavailable = false; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test1")).actionGet(); + IndexClosedException ex = expectThrows( + IndexClosedException.class, + indicesAdmin().prepareValidateQuery("test1", "test2").setIndicesOptions(options) + ); + assertEquals("closed", ex.getMessage()); + } + + public void testTwoIndicesOneClosedIgnoreUnavailable() { + createIndex("test1"); + createIndex("test2"); + + boolean ignoreUnavailable = true; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test1")).actionGet(); + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("test1", "test2").setIndicesOptions(options).get(); + assertThat(response.getTotalShards(), is(1)); + } } diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 3b3b06a1c6924..29c869a9f8d77 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -419,6 +419,7 @@ provides org.elasticsearch.features.FeatureSpecification with + org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures, org.elasticsearch.action.bulk.BulkFeatures, org.elasticsearch.features.FeatureInfrastructureFeatures, org.elasticsearch.health.HealthFeatures, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5f3b466f9f7bd..5b5d12d738194 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -104,6 +104,7 @@ static TransportVersion def(int id) { public static final TransportVersion V_8_14_0 = def(8_636_00_1); public static final TransportVersion V_8_15_0 = def(8_702_00_2); public static final TransportVersion V_8_15_2 = def(8_702_00_3); + public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15 = def(8_702_00_4); public static final TransportVersion ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS = def(8_703_00_0); public static final TransportVersion INFERENCE_ADAPTIVE_ALLOCATIONS = def(8_704_00_0); public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0); @@ -176,6 +177,8 @@ static TransportVersion def(int id) { public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); + public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16 = def(8_772_00_3); + public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16 = def(8_772_00_4); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); @@ -193,6 +196,10 @@ static TransportVersion def(int id) { public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); + public static final TransportVersion VERTEX_AI_INPUT_TYPE_ADDED = def(8_790_00_0); + public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE = def(8_791_00_0); + public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES = def(8_792_00_0); + public static final TransportVersion INDEX_STATS_ADDITIONAL_FIELDS = def(8_793_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5e4df05c10182..7791ca200a785 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -188,6 +188,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); public static final Version V_8_16_0 = new Version(8_16_00_99); + public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); public static final Version CURRENT = V_9_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index 6106e620521f7..7cefc086e17dc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import java.util.ArrayList; import java.util.HashMap; @@ -21,6 +22,8 @@ public class IndexStats implements Iterable { + public static final NodeFeature TIER_CREATION_DATE = new NodeFeature("stats.tier_creation_date"); + private final String index; private final String uuid; @@ -29,6 +32,10 @@ public class IndexStats implements Iterable { private final IndexMetadata.State state; + private final List tierPreference; + + private final Long creationDate; + private final ShardStats shards[]; public IndexStats( @@ -36,12 +43,16 @@ public IndexStats( String uuid, @Nullable ClusterHealthStatus health, @Nullable IndexMetadata.State state, + @Nullable List tierPreference, + @Nullable Long creationDate, ShardStats[] shards ) { this.index = index; this.uuid = uuid; this.health = health; this.state = state; + this.tierPreference = tierPreference; + this.creationDate = creationDate; this.shards = shards; } @@ -61,6 +72,14 @@ public IndexMetadata.State getState() { return state; } + public List getTierPreference() { + return tierPreference; + } + + public Long getCreationDate() { + return creationDate; + } + public ShardStats[] getShards() { return this.shards; } @@ -129,13 +148,24 @@ public static class IndexStatsBuilder { private final String uuid; private final ClusterHealthStatus health; private final IndexMetadata.State state; + private final List tierPreference; + private final Long creationDate; private final List shards = new ArrayList<>(); - public IndexStatsBuilder(String indexName, String uuid, @Nullable ClusterHealthStatus health, @Nullable IndexMetadata.State state) { + public IndexStatsBuilder( + String indexName, + String uuid, + @Nullable ClusterHealthStatus health, + @Nullable IndexMetadata.State state, + @Nullable List tierPreference, + @Nullable Long creationDate + ) { this.indexName = indexName; this.uuid = uuid; this.health = health; this.state = state; + this.tierPreference = tierPreference; + this.creationDate = creationDate; } public IndexStatsBuilder add(ShardStats shardStats) { @@ -144,7 +174,15 @@ public IndexStatsBuilder add(ShardStats shardStats) { } public IndexStats build() { - return new IndexStats(indexName, uuid, health, state, shards.toArray(new ShardStats[shards.size()])); + return new IndexStats( + indexName, + uuid, + health, + state, + tierPreference, + creationDate, + shards.toArray(new ShardStats[shards.size()]) + ); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java new file mode 100644 index 0000000000000..2b67885273d05 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +public class IndicesStatsFeatures implements FeatureSpecification { + + @Override + public Set getFeatures() { + return Set.of(IndexStats.TIER_CREATION_DATE); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 65f7e1969ea69..205f1cbc04e8b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -47,6 +47,10 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse { private final Map indexStateMap; + private final Map> indexTierPreferenceMap; + + private final Map indexCreationDateMap; + private final ShardStats[] shards; private Map shardStatsMap; @@ -54,12 +58,23 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse { IndicesStatsResponse(StreamInput in) throws IOException { super(in); shards = in.readArray(ShardStats::new, ShardStats[]::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_STATS_ADDITIONAL_FIELDS)) { indexHealthMap = in.readMap(ClusterHealthStatus::readFrom); indexStateMap = in.readMap(IndexMetadata.State::readFrom); + indexTierPreferenceMap = in.readMap(StreamInput::readStringCollectionAsList); + indexCreationDateMap = in.readMap(StreamInput::readLong); + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + // Between 8.1 and INDEX_STATS_ADDITIONAL_FIELDS, we had a different format for the response + // where we only had health and state available. + indexHealthMap = in.readMap(ClusterHealthStatus::readFrom); + indexStateMap = in.readMap(IndexMetadata.State::readFrom); + indexTierPreferenceMap = Map.of(); + indexCreationDateMap = Map.of(); } else { indexHealthMap = Map.of(); indexStateMap = Map.of(); + indexTierPreferenceMap = Map.of(); + indexCreationDateMap = Map.of(); } } @@ -79,6 +94,8 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse { Objects.requireNonNull(shards); Map indexHealthModifiableMap = new HashMap<>(); Map indexStateModifiableMap = new HashMap<>(); + Map> indexTierPreferenceModifiableMap = new HashMap<>(); + Map indexCreationDateModifiableMap = new HashMap<>(); for (ShardStats shard : shards) { Index index = shard.getShardRouting().index(); IndexMetadata indexMetadata = metadata.index(index); @@ -88,10 +105,14 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse { ignored -> new ClusterIndexHealth(indexMetadata, routingTable.index(index)).getStatus() ); indexStateModifiableMap.computeIfAbsent(index.getName(), ignored -> indexMetadata.getState()); + indexTierPreferenceModifiableMap.computeIfAbsent(index.getName(), ignored -> indexMetadata.getTierPreference()); + indexCreationDateModifiableMap.computeIfAbsent(index.getName(), ignored -> indexMetadata.getCreationDate()); } } indexHealthMap = unmodifiableMap(indexHealthModifiableMap); indexStateMap = unmodifiableMap(indexStateModifiableMap); + indexTierPreferenceMap = unmodifiableMap(indexTierPreferenceModifiableMap); + indexCreationDateMap = unmodifiableMap(indexCreationDateModifiableMap); } public Map asMap() { @@ -129,7 +150,14 @@ public Map getIndices() { Index index = shard.getShardRouting().index(); IndexStatsBuilder indexStatsBuilder = indexToIndexStatsBuilder.computeIfAbsent( index.getName(), - k -> new IndexStatsBuilder(k, index.getUUID(), indexHealthMap.get(index.getName()), indexStateMap.get(index.getName())) + k -> new IndexStatsBuilder( + k, + index.getUUID(), + indexHealthMap.get(index.getName()), + indexStateMap.get(index.getName()), + indexTierPreferenceMap.get(index.getName()), + indexCreationDateMap.get(index.getName()) + ) ); indexStatsBuilder.add(shard); } @@ -174,7 +202,13 @@ public CommonStats getPrimaries() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeArray(shards); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_STATS_ADDITIONAL_FIELDS)) { + out.writeMap(indexHealthMap, StreamOutput::writeWriteable); + out.writeMap(indexStateMap, StreamOutput::writeWriteable); + out.writeMap(indexTierPreferenceMap, StreamOutput::writeStringCollection); + out.writeMap(indexCreationDateMap, StreamOutput::writeLong); + + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeMap(indexHealthMap, StreamOutput::writeWriteable); out.writeMap(indexStateMap, StreamOutput::writeWriteable); } @@ -203,6 +237,12 @@ protected Iterator customXContentChunks(ToXContent.Params params) { if (indexStats.getState() != null) { builder.field("status", indexStats.getState().toString().toLowerCase(Locale.ROOT)); } + if (indexStats.getTierPreference() != null) { + builder.field("tier_preference", indexStats.getTierPreference()); + } + if (indexStats.getCreationDate() != null) { + builder.field("creation_date", indexStats.getCreationDate()); + } builder.startObject("primaries"); indexStats.getPrimaries().toXContent(builder, p); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 4c3f32240ca8c..f30206c1d238a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -32,7 +32,7 @@ */ public final class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { - public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false); + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); private QueryBuilder query = new MatchAllQueryBuilder(); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index ce3e189149451..ad1fda2534fab 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -543,7 +544,8 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques var isFailureStoreRequest = isFailureStoreRequest(docWriteRequest); if (isFailureStoreRequest == false && failureStoreCandidate.isFailureStoreEnabled() - && error instanceof VersionConflictEngineException == false) { + && error instanceof VersionConflictEngineException == false + && error instanceof EsRejectedExecutionException == false) { // Prepare the data stream failure store if necessary maybeMarkFailureStoreForRollover(failureStoreCandidate); @@ -563,8 +565,8 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques } } else { // If we can't redirect to a failure store (because either the data stream doesn't have the failure store enabled - // or this request was already targeting a failure store), or this was a version conflict we increment the - // rejected counter. + // or this request was already targeting a failure store), or this was an error that is not eligible for the failure store + // such as a version conflict or a load rejection we increment the rejected counter. failureStoreMetrics.incrementRejected( bulkItemRequest.index(), errorType, diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index d0785a60dd0f5..c0811e7424b0d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -51,6 +51,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -76,6 +77,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; + private static final Supplier ID_GENERATOR = UUIDs::base64UUID; + private static final Supplier K_SORTED_TIME_BASED_ID_GENERATOR = UUIDs::base64TimeBasedKOrderedUUID; + /** * Max length of the source document to include into string() * @@ -692,10 +696,18 @@ public void process(IndexRouting indexRouting) { * request compatible with the append-only optimization. */ public void autoGenerateId() { - assert id == null; - assert autoGeneratedTimestamp == UNSET_AUTO_GENERATED_TIMESTAMP : "timestamp has already been generated!"; - assert ifSeqNo == UNASSIGNED_SEQ_NO; - assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; + assertBeforeGeneratingId(); + autoGenerateTimestamp(); + id(ID_GENERATOR.get()); + } + + public void autoGenerateTimeBasedId() { + assertBeforeGeneratingId(); + autoGenerateTimestamp(); + id(K_SORTED_TIME_BASED_ID_GENERATOR.get()); + } + + private void autoGenerateTimestamp() { /* * Set the auto generated timestamp so the append only optimization * can quickly test if this request *must* be unique without reaching @@ -704,8 +716,13 @@ public void autoGenerateId() { * never work before 1970, but that's ok. It's after 1970. */ autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); - String uid = UUIDs.base64UUID(); - id(uid); + } + + private void assertBeforeGeneratingId() { + assert id == null; + assert autoGeneratedTimestamp == UNSET_AUTO_GENERATED_TIMESTAMP : "timestamp has already been generated!"; + assert ifSeqNo == UNASSIGNED_SEQ_NO; + assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index c4aea73cc6141..eaf62d1e57e66 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -58,7 +58,7 @@ * sort them according to the provided order. This can be useful for instance to ensure that shards that contain recent * data are executed first when sorting by descending timestamp. */ -final class CanMatchPreFilterSearchPhase extends SearchPhase { +final class CanMatchPreFilterSearchPhase { private final Logger logger; private final SearchRequest request; @@ -92,7 +92,6 @@ final class CanMatchPreFilterSearchPhase extends SearchPhase { CoordinatorRewriteContextProvider coordinatorRewriteContextProvider, ActionListener> listener ) { - super("can_match"); this.logger = logger; this.searchTransportService = searchTransportService; this.nodeIdToConnection = nodeIdToConnection; @@ -128,12 +127,6 @@ private static boolean assertSearchCoordinationThread() { return ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); } - @Override - public void run() { - assert assertSearchCoordinationThread(); - runCoordinatorRewritePhase(); - } - // tries to pre-filter shards based on information that's available to the coordinator // without having to reach out to the actual shards private void runCoordinatorRewritePhase() { @@ -189,7 +182,7 @@ private void consumeResult(boolean canMatch, ShardSearchRequest request) { private void checkNoMissingShards(GroupShardsIterator shards) { assert assertSearchCoordinationThread(); - doCheckNoMissingShards(getName(), request, shards); + SearchPhase.doCheckNoMissingShards("can_match", request, shards, SearchPhase::makeMissingShardsError); } private Map> groupByNode(GroupShardsIterator shards) { @@ -318,7 +311,7 @@ public boolean isForceExecution() { @Override public void onFailure(Exception e) { if (logger.isDebugEnabled()) { - logger.debug(() -> format("Failed to execute [%s] while running [%s] phase", request, getName()), e); + logger.debug(() -> format("Failed to execute [%s] while running [can_match] phase", request), e); } onPhaseFailure("round", e); } @@ -370,7 +363,6 @@ public CanMatchNodeRequest.Shard buildShardLevelRequest(SearchShardIterator shar ); } - @Override public void start() { if (getNumShards() == 0) { finishPhase(); @@ -381,20 +373,21 @@ public void start() { @Override public void onFailure(Exception e) { if (logger.isDebugEnabled()) { - logger.debug(() -> format("Failed to execute [%s] while running [%s] phase", request, getName()), e); + logger.debug(() -> format("Failed to execute [%s] while running [can_match] phase", request), e); } onPhaseFailure("start", e); } @Override protected void doRun() { - CanMatchPreFilterSearchPhase.this.run(); + assert assertSearchCoordinationThread(); + runCoordinatorRewritePhase(); } }); } public void onPhaseFailure(String msg, Exception cause) { - listener.onFailure(new SearchPhaseExecutionException(getName(), msg, cause, ShardSearchFailure.EMPTY_ARRAY)); + listener.onFailure(new SearchPhaseExecutionException("can_match", msg, cause, ShardSearchFailure.EMPTY_ARRAY)); } public Transport.Connection getConnection(SendingTarget sendingTarget) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index e4fef357cb4e9..d91ea85e2fa97 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -15,8 +15,8 @@ import org.elasticsearch.transport.Transport; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.Objects; +import java.util.function.Function; /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. @@ -35,21 +35,26 @@ public String getName() { return name; } - public void start() { - try { - run(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + protected String missingShardsErrorMessage(StringBuilder missingShards) { + return makeMissingShardsError(missingShards); } - protected String missingShardsErrorMessage(StringBuilder missingShards) { + protected static String makeMissingShardsError(StringBuilder missingShards) { return "Search rejected due to missing shards [" + missingShards + "]. Consider using `allow_partial_search_results` setting to bypass this error."; } protected void doCheckNoMissingShards(String phaseName, SearchRequest request, GroupShardsIterator shardsIts) { + doCheckNoMissingShards(phaseName, request, shardsIts, this::missingShardsErrorMessage); + } + + protected static void doCheckNoMissingShards( + String phaseName, + SearchRequest request, + GroupShardsIterator shardsIts, + Function makeErrorMessage + ) { assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; if (request.allowPartialSearchResults() == false) { final StringBuilder missingShards = new StringBuilder(); @@ -65,7 +70,7 @@ protected void doCheckNoMissingShards(String phaseName, SearchRequest request, G } if (missingShards.isEmpty() == false) { // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = missingShardsErrorMessage(missingShards); + final String msg = makeErrorMessage.apply(missingShards); throw new SearchPhaseExecutionException(phaseName, msg, null, ShardSearchFailure.EMPTY_ARRAY); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index afbfe129c302e..2927c394da3d4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -130,6 +130,14 @@ public SearchRequestBuilder setWaitForCheckpoints(Map waitForChe return this; } + /** + * Set the timeout for the {@link #setWaitForCheckpoints(Map)} request. + */ + public SearchRequestBuilder setWaitForCheckpointsTimeout(final TimeValue waitForCheckpointsTimeout) { + request.setWaitForCheckpointsTimeout(waitForCheckpointsTimeout); + return this; + } + /** * Specifies what type of requested indices to ignore and wildcard indices expressions. *

diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java deleted file mode 100644 index 6141e1704969b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.telemetry.metric.LongHistogram; -import org.elasticsearch.telemetry.metric.MeterRegistry; - -public class SearchTransportAPMMetrics { - public static final String SEARCH_ACTION_LATENCY_BASE_METRIC = "es.search.nodes.transport_actions.latency.histogram"; - public static final String ACTION_ATTRIBUTE_NAME = "action"; - - public static final String QUERY_CAN_MATCH_NODE_METRIC = "shards_can_match"; - public static final String DFS_ACTION_METRIC = "dfs_query_then_fetch/shard_dfs_phase"; - public static final String QUERY_ID_ACTION_METRIC = "dfs_query_then_fetch/shard_query_phase"; - public static final String QUERY_ACTION_METRIC = "query_then_fetch/shard_query_phase"; - public static final String RANK_SHARD_FEATURE_ACTION_METRIC = "rank/shard_feature_phase"; - public static final String FREE_CONTEXT_ACTION_METRIC = "shard_release_context"; - public static final String FETCH_ID_ACTION_METRIC = "shard_fetch_phase"; - public static final String QUERY_SCROLL_ACTION_METRIC = "scroll/shard_query_phase"; - public static final String FETCH_ID_SCROLL_ACTION_METRIC = "scroll/shard_fetch_phase"; - public static final String QUERY_FETCH_SCROLL_ACTION_METRIC = "scroll/shard_query_and_fetch_phase"; - public static final String FREE_CONTEXT_SCROLL_ACTION_METRIC = "scroll/shard_release_context"; - public static final String CLEAR_SCROLL_CONTEXTS_ACTION_METRIC = "scroll/shard_release_contexts"; - - private final LongHistogram actionLatencies; - - public SearchTransportAPMMetrics(MeterRegistry meterRegistry) { - this( - meterRegistry.registerLongHistogram( - SEARCH_ACTION_LATENCY_BASE_METRIC, - "Transport action execution times at the node level, expressed as a histogram", - "millis" - ) - ); - } - - private SearchTransportAPMMetrics(LongHistogram actionLatencies) { - this.actionLatencies = actionLatencies; - } - - public LongHistogram getActionLatencies() { - return actionLatencies; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 604cf950f083b..8444a92b24432 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -67,20 +67,6 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.CLEAR_SCROLL_CONTEXTS_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_CAN_MATCH_NODE_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_FETCH_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.RANK_SHARD_FEATURE_ACTION_METRIC; - /** * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. @@ -450,11 +436,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static void registerRequestHandler( - TransportService transportService, - SearchService searchService, - SearchTransportAPMMetrics searchTransportMetrics - ) { + public static void registerRequestHandler(TransportService transportService, SearchService searchService) { final TransportRequestHandler freeContextHandler = (request, channel, task) -> { logger.trace("releasing search context [{}]", request.id()); boolean freed = searchService.freeReaderContext(request.id()); @@ -465,7 +447,7 @@ public static void registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, freeContextExecutor, ScrollFreeContextRequest::new, - instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) + freeContextHandler ); TransportActionProxy.registerProxyAction( transportService, @@ -478,7 +460,7 @@ public static void registerRequestHandler( FREE_CONTEXT_ACTION_NAME, freeContextExecutor, SearchFreeContextRequest::new, - instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) + freeContextHandler ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, false, SearchFreeContextResponse::readFrom); @@ -486,10 +468,10 @@ public static void registerRequestHandler( CLEAR_SCROLL_CONTEXTS_ACTION_NAME, freeContextExecutor, ClearScrollContextsRequest::new, - instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { + (request, channel, task) -> { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); - }) + } ); TransportActionProxy.registerProxyAction( transportService, @@ -502,16 +484,7 @@ public static void registerRequestHandler( DFS_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - instrumentedHandler( - DFS_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeDfsPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) - ) + (request, channel, task) -> searchService.executeDfsPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, true, DfsSearchResult::new); @@ -519,15 +492,10 @@ public static void registerRequestHandler( QUERY_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - instrumentedHandler( - QUERY_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyActionWithDynamicResponseType( @@ -541,15 +509,10 @@ public static void registerRequestHandler( QUERY_ID_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, QuerySearchRequest::new, - instrumentedHandler( - QUERY_ID_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -558,15 +521,10 @@ public static void registerRequestHandler( QUERY_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - instrumentedHandler( - QUERY_SCROLL_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); @@ -575,15 +533,10 @@ public static void registerRequestHandler( QUERY_FETCH_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - instrumentedHandler( - QUERY_FETCH_SCROLL_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeFetchPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new); @@ -594,7 +547,7 @@ public static void registerRequestHandler( RANK_FEATURE_SHARD_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, RankFeatureShardRequest::new, - instrumentedHandler(RANK_SHARD_FEATURE_ACTION_METRIC, transportService, searchTransportMetrics, rankShardFeatureRequest) + rankShardFeatureRequest ); TransportActionProxy.registerProxyAction(transportService, RANK_FEATURE_SHARD_ACTION_NAME, true, RankFeatureResult::new); @@ -604,7 +557,7 @@ public static void registerRequestHandler( FETCH_ID_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardFetchRequest::new, - instrumentedHandler(FETCH_ID_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, shardFetchRequestHandler) + shardFetchRequestHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new); @@ -614,7 +567,7 @@ public static void registerRequestHandler( true, true, ShardFetchSearchRequest::new, - instrumentedHandler(FETCH_ID_ACTION_METRIC, transportService, searchTransportMetrics, shardFetchRequestHandler) + shardFetchRequestHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); @@ -622,12 +575,7 @@ public static void registerRequestHandler( QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - instrumentedHandler( - QUERY_CAN_MATCH_NODE_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) - ) + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } @@ -658,26 +606,6 @@ public void onFailure(Exception e) { }); } - private static TransportRequestHandler instrumentedHandler( - String actionQualifier, - TransportService transportService, - SearchTransportAPMMetrics searchTransportMetrics, - TransportRequestHandler transportRequestHandler - ) { - var threadPool = transportService.getThreadPool(); - var latencies = searchTransportMetrics.getActionLatencies(); - Map attributes = Map.of(ACTION_ATTRIBUTE_NAME, actionQualifier); - return (request, channel, task) -> { - var startTime = threadPool.relativeTimeInMillis(); - try { - transportRequestHandler.messageReceived(request, channel, task); - } finally { - var elapsedTime = threadPool.relativeTimeInMillis() - startTime; - latencies.record(elapsedTime, attributes); - } - }; - } - /** * Returns a connection to the given node on the provided cluster. If the cluster alias is null the node will be resolved * against the local cluster. diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index eee65134eae33..7ba4a7ce59869 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -148,7 +148,7 @@ private final class OpenPointInTimePhase implements TransportSearchAction.Search } @Override - public SearchPhase newSearchPhase( + public void runNewSearchPhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -166,7 +166,7 @@ public SearchPhase newSearchPhase( // that is signaled to the local can match through the SearchShardIterator#prefiltered flag. Local shards do need to go // through the local can match phase. if (SearchService.canRewriteToMatchNone(searchRequest.source())) { - return new CanMatchPreFilterSearchPhase( + new CanMatchPreFilterSearchPhase( logger, searchTransportService, connectionLookup, @@ -180,7 +180,7 @@ public SearchPhase newSearchPhase( false, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), listener.delegateFailureAndWrap( - (searchResponseActionListener, searchShardIterators) -> openPointInTimePhase( + (searchResponseActionListener, searchShardIterators) -> runOpenPointInTimePhase( task, searchRequest, executor, @@ -191,11 +191,11 @@ public SearchPhase newSearchPhase( aliasFilter, concreteIndexBoosts, clusters - ).start() + ) ) - ); + ).start(); } else { - return openPointInTimePhase( + runOpenPointInTimePhase( task, searchRequest, executor, @@ -210,7 +210,7 @@ public SearchPhase newSearchPhase( } } - SearchPhase openPointInTimePhase( + void runOpenPointInTimePhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -224,7 +224,7 @@ SearchPhase openPointInTimePhase( ) { assert searchRequest.getMaxConcurrentShardRequests() == pitRequest.maxConcurrentShardRequests() : searchRequest.getMaxConcurrentShardRequests() + " != " + pitRequest.maxConcurrentShardRequests(); - return new AbstractSearchAsyncAction<>( + new AbstractSearchAsyncAction<>( actionName, logger, namedWriteableRegistry, @@ -243,7 +243,6 @@ SearchPhase openPointInTimePhase( searchRequest.getMaxConcurrentShardRequests(), clusters ) { - @Override protected String missingShardsErrorMessage(StringBuilder missingShards) { return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + missingShards @@ -290,7 +289,7 @@ public void run() { boolean buildPointInTimeFromSearchResults() { return true; } - }; + }.start(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 35f106ab58cbc..4bca7a562fc38 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -175,7 +175,6 @@ public TransportSearchAction( IndexNameExpressionResolver indexNameExpressionResolver, NamedWriteableRegistry namedWriteableRegistry, ExecutorSelector executorSelector, - SearchTransportAPMMetrics searchTransportMetrics, SearchResponseMetrics searchResponseMetrics, Client client, UsageService usageService @@ -186,7 +185,7 @@ public TransportSearchAction( this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.remoteClusterService = searchTransportService.getRemoteClusterService(); - SearchTransportService.registerRequestHandler(transportService, searchService, searchTransportMetrics); + SearchTransportService.registerRequestHandler(transportService, searchService); this.clusterService = clusterService; this.transportService = transportService; this.searchService = searchService; @@ -1298,7 +1297,7 @@ private void executeSearch( localShardIterators.size() + remoteShardIterators.size(), defaultPreFilterShardSize ); - searchPhaseProvider.newSearchPhase( + searchPhaseProvider.runNewSearchPhase( task, searchRequest, asyncSearchExecutor, @@ -1311,7 +1310,7 @@ private void executeSearch( preFilterSearchShards, threadPool, clusters - ).start(); + ); } Executor asyncSearchExecutor(final String[] indices) { @@ -1415,7 +1414,7 @@ static GroupShardsIterator mergeShardsIterators( } interface SearchPhaseProvider { - SearchPhase newSearchPhase( + void runNewSearchPhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -1439,7 +1438,7 @@ private class AsyncSearchActionProvider implements SearchPhaseProvider { } @Override - public SearchPhase newSearchPhase( + public void runNewSearchPhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -1456,7 +1455,7 @@ public SearchPhase newSearchPhase( if (preFilter) { // only for aggs we need to contact shards even if there are no matches boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null; - return new CanMatchPreFilterSearchPhase( + new CanMatchPreFilterSearchPhase( logger, searchTransportService, connectionLookup, @@ -1469,8 +1468,8 @@ public SearchPhase newSearchPhase( task, requireAtLeastOneMatch, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), - listener.delegateFailureAndWrap( - (l, iters) -> newSearchPhase( + listener.delegateFailureAndWrap((l, iters) -> { + runNewSearchPhase( task, searchRequest, executor, @@ -1483,9 +1482,10 @@ public SearchPhase newSearchPhase( false, threadPool, clusters - ).start() - ) - ); + ); + }) + ).start(); + return; } // for synchronous CCS minimize_roundtrips=false, use the CCSSingleCoordinatorSearchProgressListener // (AsyncSearchTask will not return SearchProgressListener.NOOP, since it uses its own progress listener @@ -1506,7 +1506,7 @@ public SearchPhase newSearchPhase( ); boolean success = false; try { - final SearchPhase searchPhase; + final AbstractSearchAsyncAction searchPhase; if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { searchPhase = new SearchDfsQueryThenFetchAsyncAction( logger, @@ -1548,7 +1548,7 @@ public SearchPhase newSearchPhase( ); } success = true; - return searchPhase; + searchPhase.start(); } finally { if (success == false) { queryResultConsumer.close(); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index f418b5617b2a1..d8b57972d604f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -146,7 +146,7 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act if (SearchService.canRewriteToMatchNone(searchRequest.source()) == false) { delegate.onResponse(new SearchShardsResponse(toGroups(shardIts), clusterState.nodes().getAllNodes(), aliasFilters)); } else { - var canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> { + new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> { assert Objects.equals(clusterAlias, searchShardsRequest.clusterAlias()); return transportService.getConnection(clusterState.nodes().get(node)); }, @@ -160,8 +160,7 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act false, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), delegate.map(its -> new SearchShardsResponse(toGroups(its), clusterState.nodes().getAllNodes(), aliasFilters)) - ); - canMatchPhase.start(); + ).start(); } }) ); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 236baf89a04e9..2a83f749e7d33 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -200,9 +200,11 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { ); if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) { + logger.info("Bootstrapping Entitlements"); EntitlementBootstrap.bootstrap(); } else { // install SM after natives, shutdown hooks, etc. + logger.info("Bootstrapping java SecurityManager"); org.elasticsearch.bootstrap.Security.configure( nodeEnv, SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()), diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 0383bbb9bd401..046f4b6b0b251 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; @@ -138,6 +139,7 @@ public ClusterModule( this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.allocationDeciders = new AllocationDeciders(deciderList); + var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster); this.shardsAllocator = createShardsAllocator( settings, clusterService.getClusterSettings(), @@ -146,7 +148,8 @@ public ClusterModule( clusterService, this::reconcile, writeLoadForecaster, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); @@ -160,7 +163,12 @@ public ClusterModule( ); this.allocationService.addAllocFailuresResetListenerTo(clusterService); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); - this.allocationStatsService = new AllocationStatsService(clusterService, clusterInfoService, shardsAllocator, writeLoadForecaster); + this.allocationStatsService = new AllocationStatsService( + clusterService, + clusterInfoService, + shardsAllocator, + nodeAllocationStatsProvider + ); this.telemetryProvider = telemetryProvider; } @@ -400,7 +408,8 @@ private static ShardsAllocator createShardsAllocator( ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, WriteLoadForecaster writeLoadForecaster, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { Map> allocators = new HashMap<>(); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); @@ -412,7 +421,8 @@ private static ShardsAllocator createShardsAllocator( threadPool, clusterService, reconciler, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ) ); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 6841c6e49e3f1..62d3f5e5866bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -467,7 +467,7 @@ public void handleCommit(ApplyCommitRequest applyCommit) { logger.debug( "handleCommit: ignored commit request due to term mismatch " + "(expected: [term {} version {}], actual: [term {} version {}])", - getLastAcceptedTerm(), + getCurrentTerm(), getLastAcceptedVersion(), applyCommit.getTerm(), applyCommit.getVersion() diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index 3fb3c182f89cd..be0e3429a2ce4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.transport.Transports; @@ -147,11 +149,15 @@ public void checkIndexSplitAllowed() {} private abstract static class IdAndRoutingOnly extends IndexRouting { private final boolean routingRequired; + private final IndexVersion creationVersion; + private final IndexMode indexMode; IdAndRoutingOnly(IndexMetadata metadata) { super(metadata); + this.creationVersion = metadata.getCreationVersion(); MappingMetadata mapping = metadata.mapping(); this.routingRequired = mapping == null ? false : mapping.routingRequired(); + this.indexMode = metadata.getIndexMode(); } protected abstract int shardId(String id, @Nullable String routing); @@ -161,12 +167,25 @@ public void process(IndexRequest indexRequest) { // generate id if not already provided final String id = indexRequest.id(); if (id == null) { - indexRequest.autoGenerateId(); + if (shouldUseTimeBasedId(indexMode, creationVersion)) { + indexRequest.autoGenerateTimeBasedId(); + } else { + indexRequest.autoGenerateId(); + } } else if (id.isEmpty()) { throw new IllegalArgumentException("if _id is specified it must not be empty"); } } + private static boolean shouldUseTimeBasedId(final IndexMode indexMode, final IndexVersion creationVersion) { + return indexMode == IndexMode.LOGSDB && isNewIndexVersion(creationVersion); + } + + private static boolean isNewIndexVersion(final IndexVersion creationVersion) { + return creationVersion.between(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID_BACKPORT, IndexVersions.UPGRADE_TO_LUCENE_10_0_0) + || creationVersion.onOrAfter(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID); + } + @Override public int indexShard( String id, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java index 3651f560e6dde..0c82faaaeaa45 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -10,86 +10,35 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.Maps; import java.util.Map; +import java.util.function.Supplier; public class AllocationStatsService { - private final ClusterService clusterService; private final ClusterInfoService clusterInfoService; - private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; - private final WriteLoadForecaster writeLoadForecaster; + private final Supplier desiredBalanceSupplier; + private final NodeAllocationStatsProvider nodeAllocationStatsProvider; public AllocationStatsService( ClusterService clusterService, ClusterInfoService clusterInfoService, ShardsAllocator shardsAllocator, - WriteLoadForecaster writeLoadForecaster + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this.clusterService = clusterService; this.clusterInfoService = clusterInfoService; - this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; - this.writeLoadForecaster = writeLoadForecaster; + this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; + this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator + ? allocator::getDesiredBalance + : () -> null; } public Map stats() { - var state = clusterService.state(); - var info = clusterInfoService.getClusterInfo(); - var desiredBalance = desiredBalanceShardsAllocator != null ? desiredBalanceShardsAllocator.getDesiredBalance() : null; - - var stats = Maps.newMapWithExpectedSize(state.getRoutingNodes().size()); - for (RoutingNode node : state.getRoutingNodes()) { - int shards = 0; - int undesiredShards = 0; - double forecastedWriteLoad = 0.0; - long forecastedDiskUsage = 0; - long currentDiskUsage = 0; - for (ShardRouting shardRouting : node) { - if (shardRouting.relocating()) { - continue; - } - shards++; - IndexMetadata indexMetadata = state.metadata().getIndexSafe(shardRouting.index()); - if (isDesiredAllocation(desiredBalance, shardRouting) == false) { - undesiredShards++; - } - long shardSize = info.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); - forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); - forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); - currentDiskUsage += shardSize; - - } - stats.put( - node.nodeId(), - new NodeAllocationStats( - shards, - desiredBalanceShardsAllocator != null ? undesiredShards : -1, - forecastedWriteLoad, - forecastedDiskUsage, - currentDiskUsage - ) - ); - } - - return stats; - } - - private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { - if (desiredBalance == null) { - return true; - } - var assignment = desiredBalance.getAssignment(shardRouting.shardId()); - if (assignment == null) { - return false; - } - return assignment.nodeIds().contains(shardRouting.currentNodeId()); + return nodeAllocationStatsProvider.stats(clusterService.state(), clusterInfoService.getClusterInfo(), desiredBalanceSupplier.get()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java new file mode 100644 index 0000000000000..157b409be14d3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +public class NodeAllocationStatsProvider { + private final WriteLoadForecaster writeLoadForecaster; + + public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster) { + this.writeLoadForecaster = writeLoadForecaster; + } + + public Map stats( + ClusterState clusterState, + ClusterInfo clusterInfo, + @Nullable DesiredBalance desiredBalance + ) { + var stats = Maps.newMapWithExpectedSize(clusterState.getRoutingNodes().size()); + for (RoutingNode node : clusterState.getRoutingNodes()) { + int shards = 0; + int undesiredShards = 0; + double forecastedWriteLoad = 0.0; + long forecastedDiskUsage = 0; + long currentDiskUsage = 0; + for (ShardRouting shardRouting : node) { + if (shardRouting.relocating()) { + continue; + } + shards++; + IndexMetadata indexMetadata = clusterState.metadata().getIndexSafe(shardRouting.index()); + if (isDesiredAllocation(desiredBalance, shardRouting) == false) { + undesiredShards++; + } + long shardSize = clusterInfo.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); + forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); + currentDiskUsage += shardSize; + + } + stats.put( + node.nodeId(), + new NodeAllocationStats( + shards, + desiredBalance != null ? undesiredShards : -1, + forecastedWriteLoad, + forecastedDiskUsage, + currentDiskUsage + ) + ); + } + + return stats; + } + + private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { + if (desiredBalance == null) { + return true; + } + var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + return false; + } + return assignment.nodeIds().contains(shardRouting.currentNodeId()); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 682dc85ccd00f..3b22221ea7db4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.time.TimeProvider; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; @@ -37,7 +38,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.function.LongSupplier; import java.util.function.Predicate; import static java.util.stream.Collectors.toUnmodifiableSet; @@ -50,7 +50,7 @@ public class DesiredBalanceComputer { private static final Logger logger = LogManager.getLogger(DesiredBalanceComputer.class); private final ShardsAllocator delegateAllocator; - private final LongSupplier timeSupplierMillis; + private final TimeProvider timeProvider; // stats protected final MeanMetric iterations = new MeanMetric(); @@ -73,9 +73,9 @@ public class DesiredBalanceComputer { private TimeValue progressLogInterval; private long maxBalanceComputationTimeDuringIndexCreationMillis; - public DesiredBalanceComputer(ClusterSettings clusterSettings, LongSupplier timeSupplierMillis, ShardsAllocator delegateAllocator) { + public DesiredBalanceComputer(ClusterSettings clusterSettings, TimeProvider timeProvider, ShardsAllocator delegateAllocator) { this.delegateAllocator = delegateAllocator; - this.timeSupplierMillis = timeSupplierMillis; + this.timeProvider = timeProvider; clusterSettings.initializeAndWatch(PROGRESS_LOG_INTERVAL_SETTING, value -> this.progressLogInterval = value); clusterSettings.initializeAndWatch( MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING, @@ -275,7 +275,7 @@ public DesiredBalance compute( final int iterationCountReportInterval = computeIterationCountReportInterval(routingAllocation); final long timeWarningInterval = progressLogInterval.millis(); - final long computationStartedTime = timeSupplierMillis.getAsLong(); + final long computationStartedTime = timeProvider.relativeTimeInMillis(); long nextReportTime = computationStartedTime + timeWarningInterval; int i = 0; @@ -323,7 +323,7 @@ public DesiredBalance compute( i++; final int iterations = i; - final long currentTime = timeSupplierMillis.getAsLong(); + final long currentTime = timeProvider.relativeTimeInMillis(); final boolean reportByTime = nextReportTime <= currentTime; final boolean reportByIterationCount = i % iterationCountReportInterval == 0; if (reportByTime || reportByIterationCount) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java index d8a2d01f56dff..cf8840dc95724 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -10,6 +10,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -26,10 +27,12 @@ public record AllocationStats(long unassignedShards, long totalAllocations, long public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {} public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP); + public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current"; public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current"; public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current"; public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio"; + public static final String DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_weight.current"; public static final String DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_shard_count.current"; @@ -37,6 +40,15 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w "es.allocator.desired_balance.allocations.node_write_load.current"; public static final String DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.desired_balance.allocations.node_disk_usage_bytes.current"; + + public static final String CURRENT_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.allocations.node.shard_count.current"; + public static final String CURRENT_NODE_WRITE_LOAD_METRIC_NAME = "es.allocator.allocations.node.write_load.current"; + public static final String CURRENT_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.allocations.node.disk_usage_bytes.current"; + public static final String CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME = + "es.allocator.allocations.node.undesired_shard_count.current"; + public static final String CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME = + "es.allocator.allocations.node.forecasted_disk_usage_bytes.current"; + public static final AllocationStats EMPTY_ALLOCATION_STATS = new AllocationStats(-1, -1, -1); private volatile boolean nodeIsMaster = false; @@ -56,8 +68,13 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w private volatile long undesiredAllocations; private final AtomicReference> weightStatsPerNodeRef = new AtomicReference<>(Map.of()); + private final AtomicReference> allocationStatsPerNodeRef = new AtomicReference<>(Map.of()); - public void updateMetrics(AllocationStats allocationStats, Map weightStatsPerNode) { + public void updateMetrics( + AllocationStats allocationStats, + Map weightStatsPerNode, + Map nodeAllocationStats + ) { assert allocationStats != null : "allocation stats cannot be null"; assert weightStatsPerNode != null : "node balance weight stats cannot be null"; if (allocationStats != EMPTY_ALLOCATION_STATS) { @@ -66,6 +83,7 @@ public void updateMetrics(AllocationStats allocationStats, Map getDesiredBalanceNodeShardCountMetrics() { return values; } + private List getCurrentNodeDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).currentDiskUsage(), getNodeAttributes(node))); + } + return values; + } + + private List getCurrentNodeWriteLoadMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).forecastedIngestLoad(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).shards(), getNodeAttributes(node))); + } + return values; + } + + private List getCurrentNodeForecastedDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).forecastedDiskUsage(), getNodeAttributes(node))); + } + return values; + } + + private List getCurrentNodeUndesiredShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).undesiredShards(), getNodeAttributes(node))); + } + return values; + } + private Map getNodeAttributes(DiscoveryNode node) { return Map.of("node_id", node.getId(), "node_name", node.getName()); } @@ -216,5 +324,6 @@ public void zeroAllMetrics() { totalAllocations = 0; undesiredAllocations = 0; weightStatsPerNodeRef.set(Map.of()); + allocationStatsPerNodeRef.set(Map.of()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 129144a3d734b..5ad29debc8f20 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -34,7 +36,9 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -71,8 +75,14 @@ public class DesiredBalanceReconciler { private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); private final DesiredBalanceMetrics desiredBalanceMetrics; - - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, DesiredBalanceMetrics desiredBalanceMetrics) { + private final NodeAllocationStatsProvider nodeAllocationStatsProvider; + + public DesiredBalanceReconciler( + ClusterSettings clusterSettings, + ThreadPool threadPool, + DesiredBalanceMetrics desiredBalanceMetrics, + NodeAllocationStatsProvider nodeAllocationStatsProvider + ) { this.desiredBalanceMetrics = desiredBalanceMetrics; this.undesiredAllocationLogInterval = new FrequencyCappedAction( threadPool.relativeTimeInMillisSupplier(), @@ -83,6 +93,7 @@ public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool thre UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); + this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -143,8 +154,20 @@ void run() { logger.debug("Reconciliation is complete"); - desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode()); + updateDesireBalanceMetrics(allocationStats); + } + } + + private void updateDesireBalanceMetrics(AllocationStats allocationStats) { + var stats = nodeAllocationStatsProvider.stats(allocation.getClusterState(), allocation.clusterInfo(), desiredBalance); + Map nodeAllocationStats = new HashMap<>(stats.size()); + for (var entry : stats.entrySet()) { + var node = allocation.nodes().get(entry.getKey()); + if (node != null) { + nodeAllocationStats.put(node, entry.getValue()); + } } + desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), nodeAllocationStats); } private boolean allocateUnassignedInvariant() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 5ccb59e29d7dc..bfe8a20f18043 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; @@ -85,15 +86,17 @@ public DesiredBalanceShardsAllocator( ThreadPool threadPool, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this( delegateAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator), + new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), reconciler, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ); } @@ -103,7 +106,8 @@ public DesiredBalanceShardsAllocator( ClusterService clusterService, DesiredBalanceComputer desiredBalanceComputer, DesiredBalanceReconcilerAction reconciler, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry()); this.delegateAllocator = delegateAllocator; @@ -113,7 +117,8 @@ public DesiredBalanceShardsAllocator( this.desiredBalanceReconciler = new DesiredBalanceReconciler( clusterService.getClusterSettings(), threadPool, - desiredBalanceMetrics + desiredBalanceMetrics, + nodeAllocationStatsProvider ); this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java new file mode 100644 index 0000000000000..7ea58ee326a79 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common; + +import java.nio.ByteBuffer; +import java.util.function.Supplier; + +/** + * Generates a base64-encoded, k-ordered UUID string optimized for compression and efficient indexing. + *

+ * This method produces a time-based UUID where slowly changing components like the timestamp appear first, + * improving prefix-sharing and compression during indexing. It ensures uniqueness across nodes by incorporating + * a timestamp, a MAC address, and a sequence ID. + *

+ * Timestamp: Represents the current time in milliseconds, ensuring ordering and uniqueness. + *
+ * MAC Address: Ensures uniqueness across different coordinators. + *
+ * Sequence ID: Differentiates UUIDs generated within the same millisecond, ensuring uniqueness even at high throughput. + *

+ * The result is a compact base64-encoded string, optimized for efficient compression of the _id field in an inverted index. + */ +public class TimeBasedKOrderedUUIDGenerator extends TimeBasedUUIDGenerator { + + public TimeBasedKOrderedUUIDGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + super(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + @Override + public String getBase64UUID() { + final int sequenceId = sequenceNumber.incrementAndGet() & 0x00FF_FFFF; + + // Calculate timestamp to ensure ordering and avoid backward movement in case of time shifts. + // Uses AtomicLong to guarantee that timestamp increases even if the system clock moves backward. + // If the sequenceId overflows (reaches 0 within the same millisecond), the timestamp is incremented + // to ensure strict ordering. + long timestamp = this.lastTimestamp.accumulateAndGet( + timestampSupplier.get(), + sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max + ); + + final byte[] uuidBytes = new byte[15]; + final ByteBuffer buffer = ByteBuffer.wrap(uuidBytes); + + buffer.put((byte) (timestamp >>> 40)); // changes every 35 years + buffer.put((byte) (timestamp >>> 32)); // changes every ~50 days + buffer.put((byte) (timestamp >>> 24)); // changes every ~4.5h + buffer.put((byte) (timestamp >>> 16)); // changes every ~65 secs + + // MAC address of the coordinator might change if there are many coordinators in the cluster + // and the indexing api does not necessarily target the same coordinator. + byte[] macAddress = macAddress(); + assert macAddress.length == 6; + buffer.put(macAddress, 0, macAddress.length); + + buffer.put((byte) (sequenceId >>> 16)); + + // From hereinafter everything is almost like random and does not compress well + // due to unlikely prefix-sharing + buffer.put((byte) (timestamp >>> 8)); + buffer.put((byte) (sequenceId >>> 8)); + buffer.put((byte) timestamp); + buffer.put((byte) sequenceId); + + assert buffer.position() == uuidBytes.length; + + return Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(uuidBytes); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index 73528ed0d3866..9da878fd4af64 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -11,6 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; /** * These are essentially flake ids but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. We also reorder @@ -19,15 +20,14 @@ * For more information about flake ids, check out * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ */ - class TimeBasedUUIDGenerator implements UUIDGenerator { // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + protected final AtomicInteger sequenceNumber; + protected final AtomicLong lastTimestamp; - // Used to ensure clock moves forward: - private final AtomicLong lastTimestamp = new AtomicLong(0); + protected final Supplier timestampSupplier; private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); @@ -35,18 +35,26 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { assert SECURE_MUNGED_ADDRESS.length == 6; } - // protected for testing - protected long currentTimeMillis() { - return System.currentTimeMillis(); + static final int SIZE_IN_BYTES = 15; + private final byte[] macAddress; + + TimeBasedUUIDGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + this.timestampSupplier = timestampSupplier; + // NOTE: getting the mac address every time using the supplier is expensive, hence we cache it. + this.macAddress = macAddressSupplier.get(); + this.sequenceNumber = new AtomicInteger(sequenceIdSupplier.get()); + // Used to ensure clock moves forward: + this.lastTimestamp = new AtomicLong(0); } - // protected for testing protected byte[] macAddress() { - return SECURE_MUNGED_ADDRESS; + return macAddress; } - static final int SIZE_IN_BYTES = 15; - @Override public String getBase64UUID() { final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; @@ -55,7 +63,7 @@ public String getBase64UUID() { // still vulnerable if we are shut down, clock goes backwards, and we restart... for this we // randomize the sequenceNumber on init to decrease chance of collision: long timestamp = this.lastTimestamp.accumulateAndGet( - currentTimeMillis(), + timestampSupplier.get(), // Always force the clock to increment whenever sequence number is 0, in case we have a long // time-slip backwards: sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index 61ee4bd5d64ab..ebcb375bc01bc 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -12,11 +12,29 @@ import org.elasticsearch.common.settings.SecureString; import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +/** + * Utility class for generating various types of UUIDs. + */ public class UUIDs { + private static final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + public static final Supplier DEFAULT_TIMESTAMP_SUPPLIER = System::currentTimeMillis; + public static final Supplier DEFAULT_SEQUENCE_ID_SUPPLIER = sequenceNumber::incrementAndGet; + public static final Supplier DEFAULT_MAC_ADDRESS_SUPPLIER = MacAddressProvider::getSecureMungedAddress; + private static final UUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); + private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator( + DEFAULT_TIMESTAMP_SUPPLIER, + DEFAULT_SEQUENCE_ID_SUPPLIER, + DEFAULT_MAC_ADDRESS_SUPPLIER + ); - private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); + private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator( + DEFAULT_TIMESTAMP_SUPPLIER, + DEFAULT_SEQUENCE_ID_SUPPLIER, + DEFAULT_MAC_ADDRESS_SUPPLIER + ); /** * The length of a UUID string generated by {@link #base64UUID}. @@ -33,6 +51,14 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } + public static String base64TimeBasedKOrderedUUID() { + return TIME_BASED_K_ORDERED_GENERATOR.getBase64UUID(); + } + + public static String base64TimeBasedUUID() { + return TIME_UUID_GENERATOR.getBase64UUID(); + } + /** * The length of a UUID string generated by {@link #randomBase64UUID} and {@link #randomBase64UUIDSecureString}. */ diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index fc3674a6016aa..48a764826bad2 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -10,10 +10,7 @@ package org.elasticsearch.common.time; import org.elasticsearch.common.Strings; -import org.elasticsearch.core.Booleans; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.logging.internal.spi.LoggerFactory; import java.time.Instant; import java.time.LocalDate; @@ -45,31 +42,9 @@ import static java.time.temporal.ChronoField.MONTH_OF_YEAR; import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; -import static org.elasticsearch.common.util.ArrayUtils.prepend; public class DateFormatters { - /** - * The ISO8601 parser is as close as possible to the java.time based parsers, but there are some strings - * that are no longer accepted (multiple fractional seconds, or multiple timezones) by the ISO parser. - * If a string cannot be parsed by the ISO parser, it then tries the java.time one. - * If there's lots of these strings, trying the ISO parser, then the java.time parser, might cause a performance drop. - * So provide a JVM option so that users can just use the java.time parsers, if they really need to. - *

- * Note that this property is sometimes set by {@code ESTestCase.setTestSysProps} to flip between implementations in tests, - * to ensure both are fully tested - */ - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // evaluate if we need to deprecate/remove this - private static final boolean JAVA_TIME_PARSERS_ONLY = Booleans.parseBoolean(System.getProperty("es.datetime.java_time_parsers"), false); - - static { - // when this is used directly in tests ES logging may not have been initialized yet - LoggerFactory logger; - if (JAVA_TIME_PARSERS_ONLY && (logger = LoggerFactory.provider()) != null) { - logger.getLogger(DateFormatters.class).info("Using java.time datetime parsers only"); - } - } - private static DateFormatter newDateFormatter(String format, DateTimeFormatter formatter) { return new JavaDateFormatter(format, new JavaTimeDateTimePrinter(formatter), new JavaTimeDateTimeParser(formatter)); } @@ -159,81 +134,14 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p .toFormatter(Locale.ROOT) .withResolverStyle(ResolverStyle.STRICT); - private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER = new DateTimeFormatterBuilder().append( - STRICT_YEAR_MONTH_DAY_FORMATTER - ) - .optionalStart() - .appendLiteral('T') - .optionalStart() - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(',') - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); - /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(STRICT_DATE_OPTIONAL_TIME_FORMATTER); - - STRICT_DATE_OPTIONAL_TIME = new JavaDateFormatter( - "strict_date_optional_time", - new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale( - Locale.ROOT - ), - javaTimeParser } - ); - } - - private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS = new DateTimeFormatterBuilder().append( - STRICT_YEAR_MONTH_DAY_FORMATTER - ) - .optionalStart() - .appendLiteral('T') - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(',') - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME = new JavaDateFormatter( + "strict_date_optional_time", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale(Locale.ROOT) + ); private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS = new DateTimeFormatterBuilder().append( STRICT_YEAR_MONTH_DAY_PRINTER @@ -262,79 +170,28 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); - - STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter( - "strict_date_optional_time_nanos", - new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - true, - null, - DecimalSeparator.BOTH, - TimezonePresence.OPTIONAL - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter( + "strict_date_optional_time_nanos", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS), + new Iso8601DateTimeParser( + Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + true, + null, + DecimalSeparator.BOTH, + TimezonePresence.OPTIONAL + ).withLocale(Locale.ROOT) + ); /** * Returns a ISO 8601 compatible date time formatter and parser. * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the * existing legacy joda time ISO date formatter */ - private static final DateFormatter ISO_8601; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .optionalStart() - .appendLiteral('T') - .optionalStart() - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(",") - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); - - ISO_8601 = new JavaDateFormatter( - "iso8601", - new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale( - Locale.ROOT - ), - javaTimeParser } - ); - } + private static final DateFormatter ISO_8601 = new JavaDateFormatter( + "iso8601", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale(Locale.ROOT) + ); ///////////////////////////////////////// // @@ -755,53 +612,33 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /* * A strict formatter that formats or parses a year and a month, such as '2011-12'. */ - private static final DateFormatter STRICT_YEAR_MONTH; - static { - DateTimeFormatter javaTimeFormatter = new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) - .appendLiteral("-") - .appendValue(MONTH_OF_YEAR, 2, 2, SignStyle.NOT_NEGATIVE) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); - - STRICT_YEAR_MONTH = new JavaDateFormatter( - "strict_year_month", - new JavaTimeDateTimePrinter(javaTimeFormatter), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR), - false, - MONTH_OF_YEAR, - DecimalSeparator.BOTH, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_YEAR_MONTH = new JavaDateFormatter( + "strict_year_month", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + .appendLiteral("-") + .appendValue(MONTH_OF_YEAR, 2, 2, SignStyle.NOT_NEGATIVE) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new Iso8601DateTimeParser(Set.of(MONTH_OF_YEAR), false, MONTH_OF_YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN) + .withLocale(Locale.ROOT) + ); /* * A strict formatter that formats or parses a year, such as '2011'. */ - private static final DateFormatter STRICT_YEAR; - static { - DateTimeFormatter javaTimeFormatter = new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); - - STRICT_YEAR = new JavaDateFormatter( - "strict_year", - new JavaTimeDateTimePrinter(javaTimeFormatter), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(), false, ChronoField.YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN) - .withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_YEAR = new JavaDateFormatter( + "strict_year", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new Iso8601DateTimeParser(Set.of(), false, ChronoField.YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN).withLocale( + Locale.ROOT + ) + ); /* * A strict formatter that formats or parses a hour, minute and second, such as '09:43:25'. @@ -832,39 +669,17 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * Returns a formatter that combines a full date and time, separated by a 'T' * (uuuu-MM-dd'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_DATE_TIME; - static { - DateTimeParser[] javaTimeParsers = new DateTimeParser[] { - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ) }; - - STRICT_DATE_TIME = new JavaDateFormatter( - "strict_date_time", - new JavaTimeDateTimePrinter(STRICT_DATE_PRINTER), - JAVA_TIME_PARSERS_ONLY - ? javaTimeParsers - : prepend( - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - false, - null, - DecimalSeparator.DOT, - TimezonePresence.MANDATORY - ).withLocale(Locale.ROOT), - javaTimeParsers - ) - ); - } + private static final DateFormatter STRICT_DATE_TIME = new JavaDateFormatter( + "strict_date_time", + new JavaTimeDateTimePrinter(STRICT_DATE_PRINTER), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.MANDATORY + ).withLocale(Locale.ROOT) + ); private static final DateTimeFormatter STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE = new DateTimeFormatterBuilder().appendValue( ChronoField.YEAR, @@ -907,44 +722,22 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * Returns a formatter that combines a full date and time without millis, * separated by a 'T' (uuuu-MM-dd'T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS; - static { - DateTimeParser[] javaTimeParsers = new DateTimeParser[] { - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ) }; - - STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter( - "strict_date_time_no_millis", - new JavaTimeDateTimePrinter( - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendOffset("+HH:MM", "Z") - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - JAVA_TIME_PARSERS_ONLY - ? javaTimeParsers - : prepend( - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - false, - SECOND_OF_MINUTE, - DecimalSeparator.BOTH, - TimezonePresence.MANDATORY - ).withLocale(Locale.ROOT), - javaTimeParsers - ) - ); - } + private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + "strict_date_time_no_millis", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) + .appendOffset("+HH:MM", "Z") + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + SECOND_OF_MINUTE, + DecimalSeparator.BOTH, + TimezonePresence.MANDATORY + ).withLocale(Locale.ROOT) + ); // NOTE: this is not a strict formatter to retain the joda time based behaviour, even though it's named like this private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER = new DateTimeFormatterBuilder().append( @@ -980,75 +773,41 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * two digit minute of hour, two digit second of minute, and three digit * fraction of second (uuuu-MM-dd'T'HH:mm:ss.SSS). */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + "strict_date_hour_minute_second_fraction", + new JavaTimeDateTimePrinter( new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 9, true) + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) .toFormatter(Locale.ROOT) .withResolverStyle(ResolverStyle.STRICT) - ); - - STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( - "strict_date_hour_minute_second_fraction", - new JavaTimeDateTimePrinter( - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), - false, - null, - DecimalSeparator.DOT, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + ), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT) + ); - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + "strict_date_hour_minute_second_millis", + new JavaTimeDateTimePrinter( new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 9, true) + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) .toFormatter(Locale.ROOT) .withResolverStyle(ResolverStyle.STRICT) - ); - - STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( - "strict_date_hour_minute_second_millis", - new JavaTimeDateTimePrinter( - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), - false, - null, - DecimalSeparator.DOT, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + ), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT) + ); /* * Returns a formatter for a two digit hour of day. (HH) @@ -1362,27 +1121,17 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * two digit minute of hour, and two digit second of * minute. (uuuu-MM-dd'T'HH:mm:ss) */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND; - static { - DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT); - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); - - STRICT_DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( - "strict_date_hour_minute_second", - new JavaTimeDateTimePrinter(javaTimeFormatter), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - false, - SECOND_OF_MINUTE, - DecimalSeparator.BOTH, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( + "strict_date_hour_minute_second", + new JavaTimeDateTimePrinter(DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT)), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + SECOND_OF_MINUTE, + DecimalSeparator.BOTH, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT) + ); /* * A basic formatter for a full date as four digit year, two digit diff --git a/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java b/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java new file mode 100644 index 0000000000000..8b29d23397383 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +/** + * An interface encapsulating the different methods for getting relative and absolute time. The main + * implementation of this is {@link org.elasticsearch.threadpool.ThreadPool}. To make it clear that a + * {@code ThreadPool} is being passed around only to get time, it is preferred to use this interface. + */ +public interface TimeProvider { + + /** + * Returns a value of milliseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + long relativeTimeInMillis(); + + /** + * Returns a value of nanoseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + long relativeTimeInNanos(); + + /** + * Returns a value of milliseconds that may be used for relative time calculations. Similar to {@link #relativeTimeInMillis()} except + * that this method is more expensive: the return value is computed directly from {@link System#nanoTime} and is not cached. You should + * use {@link #relativeTimeInMillis()} unless the extra accuracy offered by this method is worth the costs. + * + * When computing a time interval by comparing relative times in milliseconds, you should make sure that both endpoints use cached + * values returned from {@link #relativeTimeInMillis()} or that they both use raw values returned from this method. It doesn't really + * make sense to compare a raw value to a cached value, even if in practice the result of such a comparison will be approximately + * sensible. + */ + long rawRelativeTimeInMillis(); + + /** + * Returns the value of milliseconds since UNIX epoch. + * + * This method should only be used for exact date/time formatting. For calculating + * time deltas that should not suffer from negative deltas, which are possible with + * this method, see {@link #relativeTimeInMillis()}. + */ + long absoluteTimeInMillis(); +} diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a7baca59e1857..bf2387453145d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -300,7 +300,7 @@ static Metadata upgradeMetadata(Metadata metadata, IndexMetadataVerifier indexMe upgradedMetadata.put(newMetadata, false); } // upgrade current templates - if (applyPluginUpgraders( + if (applyPluginTemplateUpgraders( metadata.getTemplates(), metadataUpgrader.indexTemplateMetadataUpgraders, upgradedMetadata::removeTemplate, @@ -308,10 +308,23 @@ static Metadata upgradeMetadata(Metadata metadata, IndexMetadataVerifier indexMe )) { changed = true; } + // upgrade custom metadata + for (Map.Entry> entry : metadataUpgrader.customMetadataUpgraders.entrySet()) { + String type = entry.getKey(); + Function upgrader = entry.getValue(); + Metadata.Custom original = metadata.custom(type); + if (original != null) { + Metadata.Custom upgraded = upgrader.apply(original); + if (upgraded.equals(original) == false) { + upgradedMetadata.putCustom(type, upgraded); + changed = true; + } + } + } return changed ? upgradedMetadata.build() : metadata; } - private static boolean applyPluginUpgraders( + private static boolean applyPluginTemplateUpgraders( Map existingData, UnaryOperator> upgrader, Consumer removeData, diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index e6339344b6e5f..f5f923f3657f8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -23,7 +23,6 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DocumentDimensions; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -33,6 +32,8 @@ import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.RoutingFields; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; @@ -111,8 +112,8 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -209,9 +210,9 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + public RoutingFields buildRoutingFields(IndexSettings settings) { IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); - return new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder()); + return new RoutingPathFields(routing.builder()); } @Override @@ -287,8 +288,8 @@ public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -368,8 +369,8 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -524,7 +525,7 @@ public String getName() { /** * How {@code time_series_dimension} fields are handled by indices in this mode. */ - public abstract DocumentDimensions buildDocumentDimensions(IndexSettings settings); + public abstract RoutingFields buildRoutingFields(IndexSettings settings); /** * @return Whether timestamps should be validated for being withing the time range of an index. diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 4ff7ef60cc0a2..64182b000827d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -167,7 +167,7 @@ public interface DirectoryWrapper { private final Map> similarities = new HashMap<>(); private final Map directoryFactories; private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); - private final List searchOperationListeners = new ArrayList<>(); + private final List searchOperationListeners; private final List indexOperationListeners = new ArrayList<>(); private final IndexNameExpressionResolver expressionResolver; private final AtomicBoolean frozen = new AtomicBoolean(false); @@ -194,11 +194,14 @@ public IndexModule( final IndexNameExpressionResolver expressionResolver, final Map recoveryStateFactories, final SlowLogFieldProvider slowLogFieldProvider, - final MapperMetrics mapperMetrics + final MapperMetrics mapperMetrics, + final List searchOperationListeners ) { this.indexSettings = indexSettings; this.analysisRegistry = analysisRegistry; this.engineFactory = Objects.requireNonNull(engineFactory); + // Need to have a mutable arraylist for plugins to add listeners to it + this.searchOperationListeners = new ArrayList<>(searchOperationListeners); this.searchOperationListeners.add(new SearchSlowLog(indexSettings, slowLogFieldProvider)); this.indexOperationListeners.add(new IndexingSlowLog(indexSettings, slowLogFieldProvider)); this.directoryFactories = Collections.unmodifiableMap(directoryFactories); diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 440613263d441..5746bea12a2d8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -130,8 +130,10 @@ private static Version parseUnchecked(String version) { public static final IndexVersion ENABLE_IGNORE_ABOVE_LOGSDB = def(8_517_00_0, Version.LUCENE_9_12_0); public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0); + public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID_BACKPORT = def(8_520_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); + public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 2c1175648c219..91c4b780db0bd 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -189,7 +189,11 @@ public Analyzer getAnalyzer(String analyzer) throws IOException { } }); } - return analyzerProvider.get(environment, analyzer).get(); + + return overridePositionIncrementGap( + (NamedAnalyzer) analyzerProvider.get(environment, analyzer).get(), + TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + ); } @Override @@ -720,13 +724,8 @@ private static NamedAnalyzer produceAnalyzer( throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer"); } NamedAnalyzer analyzer; - if (analyzerF instanceof NamedAnalyzer) { - // if we got a named analyzer back, use it... - analyzer = (NamedAnalyzer) analyzerF; - if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) { - // unless the positionIncrementGap needs to be overridden - analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap); - } + if (analyzerF instanceof NamedAnalyzer namedAnalyzer) { + analyzer = overridePositionIncrementGap(namedAnalyzer, overridePositionIncrementGap); } else { analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap); } @@ -734,6 +733,13 @@ private static NamedAnalyzer produceAnalyzer( return analyzer; } + private static NamedAnalyzer overridePositionIncrementGap(NamedAnalyzer analyzer, int overridePositionIncrementGap) { + if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) { + analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap); + } + return analyzer; + } + private static void processNormalizerFactory( String name, AnalyzerProvider normalizerFactory, diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 5277999271984..59607fadc0dd9 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -341,5 +341,13 @@ public interface Listener { * @param accountable the bitsets ram representation */ void onRemoval(ShardId shardId, Accountable accountable); + + Listener NOOP = new Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) {} + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) {} + }; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 5aaaf7dce83c9..f74d58093a7f5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -499,7 +499,7 @@ private void indexValue(DocumentParserContext context, Boolean value) { } if (fieldType().isDimension()) { - context.getDimensions().addBoolean(fieldType().name(), value).validate(context.indexSettings()); + context.getRoutingFields().addBoolean(fieldType().name(), value); } if (indexed) { context.doc().add(new StringField(fieldType().name(), value ? Values.TRUE : Values.FALSE, Field.Store.NO)); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java deleted file mode 100644 index 8f26d21324d9b..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.IndexSettings; - -import java.net.InetAddress; - -/** - * Collects dimensions from documents. - */ -public interface DocumentDimensions { - - /** - * Build an index's DocumentDimensions using its settings - */ - static DocumentDimensions fromIndexSettings(IndexSettings indexSettings) { - return indexSettings.getMode().buildDocumentDimensions(indexSettings); - } - - /** - * This overloaded method tries to take advantage of the fact that the UTF-8 - * value is already computed in some cases when we want to collect - * dimensions, so we can save re-computing the UTF-8 encoding. - */ - DocumentDimensions addString(String fieldName, BytesRef utf8Value); - - default DocumentDimensions addString(String fieldName, String value) { - return addString(fieldName, new BytesRef(value)); - } - - DocumentDimensions addIp(String fieldName, InetAddress value); - - DocumentDimensions addLong(String fieldName, long value); - - DocumentDimensions addUnsignedLong(String fieldName, long value); - - DocumentDimensions addBoolean(String fieldName, boolean value); - - DocumentDimensions validate(IndexSettings settings); - - /** - * Noop implementation that doesn't perform validations on dimension fields - */ - enum Noop implements DocumentDimensions { - - INSTANCE; - - @Override - public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { - return this; - } - - @Override - public DocumentDimensions addString(String fieldName, String value) { - return this; - } - - @Override - public DocumentDimensions addIp(String fieldName, InetAddress value) { - return this; - } - - @Override - public DocumentDimensions addLong(String fieldName, long value) { - return this; - } - - @Override - public DocumentDimensions addUnsignedLong(String fieldName, long value) { - return this; - } - - @Override - public DocumentDimensions addBoolean(String fieldName, boolean value) { - return this; - } - - @Override - public DocumentDimensions validate(IndexSettings settings) { - return this; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index c84df68a637e2..51e4e9f4c1b5e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -126,7 +126,7 @@ private enum Scope { private final DynamicMapperSize dynamicMappersSize; private final Map dynamicObjectMappers; private final Map> dynamicRuntimeFields; - private final DocumentDimensions dimensions; + private final RoutingFields routingFields; private final ObjectMapper parent; private final ObjectMapper.Dynamic dynamic; private String id; @@ -158,7 +158,7 @@ private DocumentParserContext( String id, Field version, SeqNoFieldMapper.SequenceIDFields seqID, - DocumentDimensions dimensions, + RoutingFields routingFields, ObjectMapper parent, ObjectMapper.Dynamic dynamic, Set fieldsAppliedFromTemplates, @@ -178,7 +178,7 @@ private DocumentParserContext( this.id = id; this.version = version; this.seqID = seqID; - this.dimensions = dimensions; + this.routingFields = routingFields; this.parent = parent; this.dynamic = dynamic; this.fieldsAppliedFromTemplates = fieldsAppliedFromTemplates; @@ -201,7 +201,7 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.id, in.version, in.seqID, - in.dimensions, + in.routingFields, parent, dynamic, in.fieldsAppliedFromTemplates, @@ -231,7 +231,7 @@ protected DocumentParserContext( null, null, SeqNoFieldMapper.SequenceIDFields.emptySeqID(), - DocumentDimensions.fromIndexSettings(mappingParserContext.getIndexSettings()), + RoutingFields.fromIndexSettings(mappingParserContext.getIndexSettings()), parent, dynamic, new HashSet<>(), @@ -762,8 +762,8 @@ public XContentParser parser() { /** * The collection of dimensions for this document. */ - public DocumentDimensions getDimensions() { - return dimensions; + public RoutingFields getRoutingFields() { + return routingFields; } public abstract ContentPath path(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 2efeeba893c6c..09f44f139d8bc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -549,7 +549,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio private void indexValue(DocumentParserContext context, InetAddress address) { if (dimension) { - context.getDimensions().addIp(fieldType().name(), address).validate(context.indexSettings()); + context.getRoutingFields().addIp(fieldType().name(), address); } if (indexed) { Field field = new InetAddressPoint(fieldType().name(), address); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index ecc708bc94614..32aa422b18bcc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -944,7 +944,7 @@ private void indexValue(DocumentParserContext context, String value) { final BytesRef binaryValue = new BytesRef(value); if (fieldType().isDimension()) { - context.getDimensions().addString(fieldType().name(), binaryValue).validate(context.indexSettings()); + context.getRoutingFields().addString(fieldType().name(), binaryValue); } // If the UTF8 encoding of the field value is bigger than the max length 32766, Lucene fill fail the indexing request and, to diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 55ed1e10428aa..8c21dfea31b9a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1991,7 +1991,7 @@ public Number value(XContentParser parser) throws IllegalArgumentException, IOEx */ public void indexValue(DocumentParserContext context, Number numericValue) { if (dimension && numericValue != null) { - context.getDimensions().addLong(fieldType().name(), numericValue.longValue()).validate(context.indexSettings()); + context.getRoutingFields().addLong(fieldType().name(), numericValue.longValue()); } fieldType().type.addFields(context.doc(), fieldType().name(), numericValue, indexed, hasDocValues, stored); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java new file mode 100644 index 0000000000000..4d8d8fdcbd296 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexSettings; + +import java.net.InetAddress; + +/** + * Collects fields contributing to routing from documents. + */ +public interface RoutingFields { + + /** + * Collect routing fields from index settings + */ + static RoutingFields fromIndexSettings(IndexSettings indexSettings) { + return indexSettings.getMode().buildRoutingFields(indexSettings); + } + + /** + * This overloaded method tries to take advantage of the fact that the UTF-8 + * value is already computed in some cases when we want to collect + * routing fields, so we can save re-computing the UTF-8 encoding. + */ + RoutingFields addString(String fieldName, BytesRef utf8Value); + + default RoutingFields addString(String fieldName, String value) { + return addString(fieldName, new BytesRef(value)); + } + + RoutingFields addIp(String fieldName, InetAddress value); + + RoutingFields addLong(String fieldName, long value); + + RoutingFields addUnsignedLong(String fieldName, long value); + + RoutingFields addBoolean(String fieldName, boolean value); + + /** + * Noop implementation that doesn't perform validations on routing fields + */ + enum Noop implements RoutingFields { + + INSTANCE; + + @Override + public RoutingFields addString(String fieldName, BytesRef utf8Value) { + return this; + } + + @Override + public RoutingFields addString(String fieldName, String value) { + return this; + } + + @Override + public RoutingFields addIp(String fieldName, InetAddress value) { + return this; + } + + @Override + public RoutingFields addLong(String fieldName, long value) { + return this; + } + + @Override + public RoutingFields addUnsignedLong(String fieldName, long value) { + return this; + } + + @Override + public RoutingFields addBoolean(String fieldName, boolean value) { + return this; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java new file mode 100644 index 0000000000000..73baca1bf3fdb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java @@ -0,0 +1,269 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; +import org.elasticsearch.cluster.routing.IndexRouting; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.hash.Murmur3Hasher; +import org.elasticsearch.common.hash.MurmurHash3; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.search.DocValueFormat; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * Implementation of routing fields, using field matching based on the routing path content. + */ +public final class RoutingPathFields implements RoutingFields { + + private static final int SEED = 0; + + private static final int MAX_ROUTING_FIELDS = 512; + + private static final int MAX_HASH_LEN_BYTES = 2; + static { + assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], hashLen(MAX_ROUTING_FIELDS), 0); + } + + /** + * A map of the serialized values of routing fields that will be used + * for generating the _tsid field. The map will be used by {@link RoutingPathFields} + * to build the _tsid field for the document. + */ + private final SortedMap> routingValues = new TreeMap<>(); + + /** + * Builds the routing. Used for building {@code _id}. If null then skipped. + */ + @Nullable + private final IndexRouting.ExtractFromSource.Builder routingBuilder; + + public RoutingPathFields(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { + this.routingBuilder = routingBuilder; + } + + SortedMap> routingValues() { + return Collections.unmodifiableSortedMap(routingValues); + } + + IndexRouting.ExtractFromSource.Builder routingBuilder() { + return routingBuilder; + } + + /** + * Here we build the hash of the routing values using a similarity function so that we have a result + * with the following pattern: + * + * hash128(concatenate(routing field names)) + + * foreach(routing field value, limit = MAX_ROUTING_FIELDS) { hash32(routing field value) } + + * hash128(concatenate(routing field values)) + * + * The idea is to be able to place 'similar' values close to each other. + */ + public BytesReference buildHash() { + Murmur3Hasher hasher = new Murmur3Hasher(SEED); + + // NOTE: hash all routing field names + int numberOfFields = Math.min(MAX_ROUTING_FIELDS, routingValues.size()); + int len = hashLen(numberOfFields); + // either one or two bytes are occupied by the vint since we're bounded by #MAX_ROUTING_FIELDS + byte[] hash = new byte[MAX_HASH_LEN_BYTES + len]; + int index = StreamOutput.putVInt(hash, len, 0); + + hasher.reset(); + for (final BytesRef name : routingValues.keySet()) { + hasher.update(name.bytes); + } + index = writeHash128(hasher.digestHash(), hash, index); + + // NOTE: concatenate all routing field value hashes up to a certain number of fields + int startIndex = index; + for (final List values : routingValues.values()) { + if ((index - startIndex) >= 4 * numberOfFields) { + break; + } + assert values.isEmpty() == false : "routing values are empty"; + final BytesRef routingValue = values.get(0).toBytesRef(); + ByteUtils.writeIntLE( + StringHelper.murmurhash3_x86_32(routingValue.bytes, routingValue.offset, routingValue.length, SEED), + hash, + index + ); + index += 4; + } + + // NOTE: hash all routing field allValues + hasher.reset(); + for (final List values : routingValues.values()) { + for (BytesReference v : values) { + hasher.update(v.toBytesRef().bytes); + } + } + index = writeHash128(hasher.digestHash(), hash, index); + + return new BytesArray(hash, 0, index); + } + + private static int hashLen(int numberOfFields) { + return 16 + 16 + 4 * numberOfFields; + } + + private static int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int index) { + ByteUtils.writeLongLE(hash128.h1, buffer, index); + index += 8; + ByteUtils.writeLongLE(hash128.h2, buffer, index); + index += 8; + return index; + } + + @Override + public RoutingFields addString(String fieldName, BytesRef utf8Value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 's'); + /* + * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish + * so it's easier for folks to reason about the space taken up. Mostly + * it'll be smaller too. + */ + out.writeBytesRef(utf8Value); + add(fieldName, out.bytes()); + + if (routingBuilder != null) { + routingBuilder.addMatching(fieldName, utf8Value); + } + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + @Override + public RoutingFields addIp(String fieldName, InetAddress value) { + return addString(fieldName, NetworkAddress.format(value)); + } + + @Override + public RoutingFields addLong(String fieldName, long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'l'); + out.writeLong(value); + add(fieldName, out.bytes()); + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + @Override + public RoutingFields addUnsignedLong(String fieldName, long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value); + if (ul instanceof Long l) { + out.write((byte) 'l'); + out.writeLong(l); + } else { + out.write((byte) 'u'); + out.writeLong(value); + } + add(fieldName, out.bytes()); + return this; + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + } + + @Override + public RoutingFields addBoolean(String fieldName, boolean value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'b'); + out.write(value ? 't' : 'f'); + add(fieldName, out.bytes()); + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + private void add(String fieldName, BytesReference encoded) throws IOException { + BytesRef name = new BytesRef(fieldName); + List values = routingValues.get(name); + if (values == null) { + // optimize for the common case where routing fields are not multi-valued + routingValues.put(name, List.of(encoded)); + } else { + if (values.size() == 1) { + // converts the immutable list that's optimized for the common case of having only one value to a mutable list + BytesReference previousValue = values.get(0); + values = new ArrayList<>(4); + values.add(previousValue); + routingValues.put(name, values); + } + values.add(encoded); + } + } + + public static Map decodeAsMap(BytesRef bytesRef) { + try (StreamInput in = new BytesArray(bytesRef).streamInput()) { + int size = in.readVInt(); + Map result = new LinkedHashMap<>(size); + + for (int i = 0; i < size; i++) { + String name = null; + try { + name = in.readSlicedBytesReference().utf8ToString(); + } catch (AssertionError ae) { + throw new IllegalArgumentException("Error parsing routing field: " + ae.getMessage(), ae); + } + + int type = in.read(); + switch (type) { + case (byte) 's' -> { + // parse a string + try { + result.put(name, in.readSlicedBytesReference().utf8ToString()); + } catch (AssertionError ae) { + throw new IllegalArgumentException("Error parsing routing field: " + ae.getMessage(), ae); + } + } + case (byte) 'l' -> // parse a long + result.put(name, in.readLong()); + case (byte) 'u' -> { // parse an unsigned_long + Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(in.readLong()); + result.put(name, ul); + } + case (byte) 'd' -> // parse a double + result.put(name, in.readDouble()); + case (byte) 'b' -> // parse a boolean + result.put(name, in.read() == 't'); + default -> throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + } + } + return result; + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Routing field cannot be deserialized:" + e.getMessage(), e); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index a6b2ad265decf..8af3c3e6ec270 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -12,21 +12,11 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.hash.Murmur3Hasher; -import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.fielddata.FieldData; @@ -40,15 +30,10 @@ import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.io.IOException; -import java.net.InetAddress; import java.time.ZoneId; -import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import java.util.SortedMap; -import java.util.TreeMap; /** * Mapper for {@code _tsid} field included generated when the index is @@ -136,15 +121,24 @@ private TimeSeriesIdFieldMapper() { public void postParse(DocumentParserContext context) throws IOException { assert fieldType().isIndexed() == false; - final TimeSeriesIdBuilder timeSeriesIdBuilder = (TimeSeriesIdBuilder) context.getDimensions(); - final BytesRef timeSeriesId = getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ID_HASHING) - ? timeSeriesIdBuilder.buildLegacyTsid().toBytesRef() - : timeSeriesIdBuilder.buildTsidHash().toBytesRef(); + final RoutingPathFields routingPathFields = (RoutingPathFields) context.getRoutingFields(); + final BytesRef timeSeriesId; + if (getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ID_HASHING)) { + long limit = context.indexSettings().getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING); + int size = routingPathFields.routingValues().size(); + if (size > limit) { + throw new MapperException("Too many dimension fields [" + size + "], max [" + limit + "] dimension fields allowed"); + } + timeSeriesId = buildLegacyTsid(routingPathFields).toBytesRef(); + } else { + timeSeriesId = routingPathFields.buildHash().toBytesRef(); + } context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId)); + TsidExtractingIdFieldMapper.createField( context, getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID) - ? timeSeriesIdBuilder.routingBuilder + ? routingPathFields.routingBuilder() : null, timeSeriesId ); @@ -170,231 +164,6 @@ public static Object encodeTsid(StreamInput in) { } } - public static class TimeSeriesIdBuilder implements DocumentDimensions { - - private static final int SEED = 0; - - public static final int MAX_DIMENSIONS = 512; - - private final Murmur3Hasher tsidHasher = new Murmur3Hasher(0); - - /** - * A map of the serialized values of dimension fields that will be used - * for generating the _tsid field. The map will be used by {@link TimeSeriesIdFieldMapper} - * to build the _tsid field for the document. - */ - private final SortedMap> dimensions = new TreeMap<>(); - /** - * Builds the routing. Used for building {@code _id}. If null then skipped. - */ - @Nullable - private final IndexRouting.ExtractFromSource.Builder routingBuilder; - - public TimeSeriesIdBuilder(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { - this.routingBuilder = routingBuilder; - } - - public BytesReference buildLegacyTsid() throws IOException { - if (dimensions.isEmpty()) { - throw new IllegalArgumentException("Dimension fields are missing."); - } - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeVInt(dimensions.size()); - for (Map.Entry> entry : dimensions.entrySet()) { - out.writeBytesRef(entry.getKey()); - List value = entry.getValue(); - if (value.size() > 1) { - // multi-value dimensions are only supported for newer indices that use buildTsidHash - throw new IllegalArgumentException( - "Dimension field [" + entry.getKey().utf8ToString() + "] cannot be a multi-valued field." - ); - } - assert value.isEmpty() == false : "dimension value is empty"; - value.get(0).writeTo(out); - } - return out.bytes(); - } - } - - private static final int MAX_HASH_LEN_BYTES = 2; - - static { - assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], tsidHashLen(MAX_DIMENSIONS), 0); - } - - /** - * Here we build the hash of the tsid using a similarity function so that we have a result - * with the following pattern: - * - * hash128(catenate(dimension field names)) + - * foreach(dimension field value, limit = MAX_DIMENSIONS) { hash32(dimension field value) } + - * hash128(catenate(dimension field values)) - * - * The idea is to be able to place 'similar' time series close to each other. Two time series - * are considered 'similar' if they share the same dimensions (names and values). - */ - public BytesReference buildTsidHash() { - // NOTE: hash all dimension field names - int numberOfDimensions = Math.min(MAX_DIMENSIONS, dimensions.size()); - int len = tsidHashLen(numberOfDimensions); - // either one or two bytes are occupied by the vint since we're bounded by #MAX_DIMENSIONS - byte[] tsidHash = new byte[MAX_HASH_LEN_BYTES + len]; - int tsidHashIndex = StreamOutput.putVInt(tsidHash, len, 0); - - tsidHasher.reset(); - for (final BytesRef name : dimensions.keySet()) { - tsidHasher.update(name.bytes); - } - tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - - // NOTE: concatenate all dimension value hashes up to a certain number of dimensions - int tsidHashStartIndex = tsidHashIndex; - for (final List values : dimensions.values()) { - if ((tsidHashIndex - tsidHashStartIndex) >= 4 * numberOfDimensions) { - break; - } - assert values.isEmpty() == false : "dimension values are empty"; - final BytesRef dimensionValueBytesRef = values.get(0).toBytesRef(); - ByteUtils.writeIntLE( - StringHelper.murmurhash3_x86_32( - dimensionValueBytesRef.bytes, - dimensionValueBytesRef.offset, - dimensionValueBytesRef.length, - SEED - ), - tsidHash, - tsidHashIndex - ); - tsidHashIndex += 4; - } - - // NOTE: hash all dimension field allValues - tsidHasher.reset(); - for (final List values : dimensions.values()) { - for (BytesReference v : values) { - tsidHasher.update(v.toBytesRef().bytes); - } - } - tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - - return new BytesArray(tsidHash, 0, tsidHashIndex); - } - - private static int tsidHashLen(int numberOfDimensions) { - return 16 + 16 + 4 * numberOfDimensions; - } - - private int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int tsidHashIndex) { - ByteUtils.writeLongLE(hash128.h1, buffer, tsidHashIndex); - tsidHashIndex += 8; - ByteUtils.writeLongLE(hash128.h2, buffer, tsidHashIndex); - tsidHashIndex += 8; - return tsidHashIndex; - } - - @Override - public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 's'); - /* - * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish - * so it's easier for folks to reason about the space taken up. Mostly - * it'll be smaller too. - */ - out.writeBytesRef(utf8Value); - add(fieldName, out.bytes()); - - if (routingBuilder != null) { - routingBuilder.addMatching(fieldName, utf8Value); - } - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions addIp(String fieldName, InetAddress value) { - return addString(fieldName, NetworkAddress.format(value)); - } - - @Override - public DocumentDimensions addLong(String fieldName, long value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 'l'); - out.writeLong(value); - add(fieldName, out.bytes()); - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions addUnsignedLong(String fieldName, long value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value); - if (ul instanceof Long l) { - out.write((byte) 'l'); - out.writeLong(l); - } else { - out.write((byte) 'u'); - out.writeLong(value); - } - add(fieldName, out.bytes()); - return this; - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - } - - @Override - public DocumentDimensions addBoolean(String fieldName, boolean value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 'b'); - out.write(value ? 't' : 'f'); - add(fieldName, out.bytes()); - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions validate(final IndexSettings settings) { - if (settings.getIndexVersionCreated().before(IndexVersions.TIME_SERIES_ID_HASHING) - && dimensions.size() > settings.getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING)) { - throw new MapperException( - "Too many dimension fields [" - + dimensions.size() - + "], max [" - + settings.getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING) - + "] dimension fields allowed" - ); - } - return this; - } - - private void add(String fieldName, BytesReference encoded) throws IOException { - BytesRef name = new BytesRef(fieldName); - List values = dimensions.get(name); - if (values == null) { - // optimize for the common case where dimensions are not multi-valued - dimensions.put(name, List.of(encoded)); - } else { - if (values.size() == 1) { - // converts the immutable list that's optimized for the common case of having only one value to a mutable list - BytesReference previousValue = values.get(0); - values = new ArrayList<>(4); - values.add(previousValue); - dimensions.put(name, values); - } - values.add(encoded); - } - } - } - public static Object encodeTsid(final BytesRef bytesRef) { return base64Encode(bytesRef); } @@ -405,53 +174,27 @@ private static String base64Encode(final BytesRef bytesRef) { return Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(bytes); } - public static Map decodeTsidAsMap(BytesRef bytesRef) { - try (StreamInput input = new BytesArray(bytesRef).streamInput()) { - return decodeTsidAsMap(input); - } catch (IOException ex) { - throw new IllegalArgumentException("Dimension field cannot be deserialized.", ex); - } - } - - public static Map decodeTsidAsMap(StreamInput in) { - try { - int size = in.readVInt(); - Map result = new LinkedHashMap<>(size); - - for (int i = 0; i < size; i++) { - String name = null; - try { - name = in.readSlicedBytesReference().utf8ToString(); - } catch (AssertionError ae) { - throw new IllegalArgumentException("Error parsing keyword dimension: " + ae.getMessage(), ae); - } - - int type = in.read(); - switch (type) { - case (byte) 's' -> { - // parse a string - try { - result.put(name, in.readSlicedBytesReference().utf8ToString()); - } catch (AssertionError ae) { - throw new IllegalArgumentException("Error parsing keyword dimension: " + ae.getMessage(), ae); - } - } - case (byte) 'l' -> // parse a long - result.put(name, in.readLong()); - case (byte) 'u' -> { // parse an unsigned_long - Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(in.readLong()); - result.put(name, ul); - } - case (byte) 'd' -> // parse a double - result.put(name, in.readDouble()); - case (byte) 'b' -> // parse a boolean - result.put(name, in.read() == 't'); - default -> throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + public static BytesReference buildLegacyTsid(RoutingPathFields routingPathFields) throws IOException { + SortedMap> routingValues = routingPathFields.routingValues(); + if (routingValues.isEmpty()) { + throw new IllegalArgumentException("Dimension fields are missing."); + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(routingValues.size()); + for (var entry : routingValues.entrySet()) { + out.writeBytesRef(entry.getKey()); + List value = entry.getValue(); + if (value.size() > 1) { + // multi-value dimensions are only supported for newer indices that use buildTsidHash + throw new IllegalArgumentException( + "Dimension field [" + entry.getKey().utf8ToString() + "] cannot be a multi-valued field." + ); } + assert value.isEmpty() == false : "dimension value is empty"; + value.get(0).writeTo(out); } - return result; - } catch (IOException | IllegalArgumentException e) { - throw new IllegalArgumentException("Error formatting " + NAME + ": " + e.getMessage(), e); + return out.bytes(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java index 351e3149da3df..93ef04ddd159a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java @@ -184,10 +184,7 @@ private void addField(Context context, ContentPath path, String currentName, Str final String keyedFieldName = FlattenedFieldParser.extractKey(bytesKeyedValue).utf8ToString(); if (fieldType.isDimension() && fieldType.dimensions().contains(keyedFieldName)) { final BytesRef keyedFieldValue = FlattenedFieldParser.extractValue(bytesKeyedValue); - context.documentParserContext() - .getDimensions() - .addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue) - .validate(context.documentParserContext().indexSettings()); + context.documentParserContext().getRoutingFields().addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValues.java new file mode 100644 index 0000000000000..a91960832239f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValues.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper.vectors; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.script.field.vectors.MultiDenseVector; + +import java.util.Iterator; + +public class MultiDenseVectorScriptDocValues extends ScriptDocValues { + + public static final String MISSING_VECTOR_FIELD_MESSAGE = "A document doesn't have a value for a multi-vector field!"; + + private final int dims; + protected final MultiDenseVectorSupplier dvSupplier; + + public MultiDenseVectorScriptDocValues(MultiDenseVectorSupplier supplier, int dims) { + super(supplier); + this.dvSupplier = supplier; + this.dims = dims; + } + + public int dims() { + return dims; + } + + private MultiDenseVector getCheckedVector() { + MultiDenseVector vector = dvSupplier.getInternal(); + if (vector == null) { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + return vector; + } + + /** + * Get multi-dense vector's value as an array of floats + */ + public Iterator getVectorValues() { + return getCheckedVector().getVectors(); + } + + /** + * Get dense vector's magnitude + */ + public float[] getMagnitudes() { + return getCheckedVector().getMagnitudes(); + } + + @Override + public BytesRef get(int index) { + throw new UnsupportedOperationException( + "accessing a multi-vector field's value through 'get' or 'value' is not supported, use 'vectorValues' or 'magnitudes' instead." + ); + } + + @Override + public int size() { + MultiDenseVector mdv = dvSupplier.getInternal(); + if (mdv != null) { + return mdv.size(); + } + return 0; + } + + public interface MultiDenseVectorSupplier extends Supplier { + @Override + default BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + MultiDenseVector getInternal(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java index cc6fb38274451..b9716d315f33a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java @@ -9,37 +9,44 @@ package org.elasticsearch.index.mapper.vectors; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fielddata.LeafFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.vectors.BitMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.ByteMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.FloatMultiDenseVectorDocValuesField; + +import java.io.IOException; final class MultiVectorDVLeafFieldData implements LeafFieldData { private final LeafReader reader; private final String field; - private final IndexVersion indexVersion; private final DenseVectorFieldMapper.ElementType elementType; private final int dims; - MultiVectorDVLeafFieldData( - LeafReader reader, - String field, - IndexVersion indexVersion, - DenseVectorFieldMapper.ElementType elementType, - int dims - ) { + MultiVectorDVLeafFieldData(LeafReader reader, String field, DenseVectorFieldMapper.ElementType elementType, int dims) { this.reader = reader; this.field = field; - this.indexVersion = indexVersion; this.elementType = elementType; this.dims = dims; } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { - // TODO - return null; + try { + BinaryDocValues values = DocValues.getBinary(reader, field); + BinaryDocValues magnitudeValues = DocValues.getBinary(reader, field + MultiDenseVectorFieldMapper.VECTOR_MAGNITUDES_SUFFIX); + return switch (elementType) { + case BYTE -> new ByteMultiDenseVectorDocValuesField(values, magnitudeValues, name, elementType, dims); + case FLOAT -> new FloatMultiDenseVectorDocValuesField(values, magnitudeValues, name, elementType, dims); + case BIT -> new BitMultiDenseVectorDocValuesField(values, magnitudeValues, name, elementType, dims); + }; + } catch (IOException e) { + throw new IllegalStateException("Cannot load doc values for multi-vector field!", e); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java index 65ef492ce052b..44a666e25a611 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java @@ -55,7 +55,7 @@ public ValuesSourceType getValuesSourceType() { @Override public MultiVectorDVLeafFieldData load(LeafReaderContext context) { - return new MultiVectorDVLeafFieldData(context.reader(), fieldName, indexVersion, elementType, dims); + return new MultiVectorDVLeafFieldData(context.reader(), fieldName, elementType, dims); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java index 9d09a7493d605..3db2d164846bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java @@ -84,4 +84,24 @@ public static void decodeDenseVector(IndexVersion indexVersion, BytesRef vectorB } } + public static float[] getMultiMagnitudes(BytesRef magnitudes) { + assert magnitudes.length % Float.BYTES == 0; + float[] multiMagnitudes = new float[magnitudes.length / Float.BYTES]; + ByteBuffer byteBuffer = ByteBuffer.wrap(magnitudes.bytes, magnitudes.offset, magnitudes.length).order(ByteOrder.LITTLE_ENDIAN); + for (int i = 0; i < magnitudes.length / Float.BYTES; i++) { + multiMagnitudes[i] = byteBuffer.getFloat(); + } + return multiMagnitudes; + } + + public static void decodeMultiDenseVector(BytesRef vectorBR, int numVectors, float[][] multiVectorValue) { + if (vectorBR == null) { + throw new IllegalArgumentException(MultiDenseVectorScriptDocValues.MISSING_VECTOR_FIELD_MESSAGE); + } + FloatBuffer fb = ByteBuffer.wrap(vectorBR.bytes, vectorBR.offset, vectorBR.length).order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer(); + for (int i = 0; i < numVectors; i++) { + fb.get(multiVectorValue[i]); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 626875c75a5fe..83bca7d27aeeb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -112,6 +112,13 @@ public QueryBuilder query() { return query; } + /** + * Returns path to the searched nested object. + */ + public String path() { + return path; + } + /** * Returns inner hit definition in the scope of this query and reusing the defined type and query. */ diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java rename to server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java index 1539be9a46ab9..33077697a2ce6 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java @@ -7,7 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.search.retriever.rankdoc; +package org.elasticsearch.index.query; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; @@ -16,15 +16,13 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.AbstractQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQuery; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.TransportVersions.RRF_QUERY_REWRITE; @@ -55,6 +53,15 @@ public RankDocsQueryBuilder(StreamInput in) throws IOException { } } + @Override + protected void extractInnerHitBuilders(Map innerHits) { + if (queryBuilders != null) { + for (QueryBuilder query : queryBuilders) { + InnerHitContextBuilder.extractInnerHits(query, innerHits); + } + } + } + @Override protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { if (queryBuilders != null) { @@ -71,7 +78,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws return super.doRewrite(queryRewriteContext); } - RankDoc[] rankDocs() { + public RankDoc[] rankDocs() { return rankDocs; } diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java new file mode 100644 index 0000000000000..6b523a154379e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.search.stats; + +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public final class ShardSearchPhaseAPMMetrics implements SearchOperationListener { + + public static final String QUERY_SEARCH_PHASE_METRIC = "es.search.shards.phases.query.duration.histogram"; + public static final String FETCH_SEARCH_PHASE_METRIC = "es.search.shards.phases.fetch.duration.histogram"; + + public static final String SYSTEM_THREAD_ATTRIBUTE_NAME = "system_thread"; + + private final LongHistogram queryPhaseMetric; + private final LongHistogram fetchPhaseMetric; + + // Avoid allocating objects in the search path and multithreading clashes + private static final ThreadLocal> THREAD_LOCAL_ATTRS = ThreadLocal.withInitial(() -> new HashMap<>(1)); + + public ShardSearchPhaseAPMMetrics(MeterRegistry meterRegistry) { + this.queryPhaseMetric = meterRegistry.registerLongHistogram( + QUERY_SEARCH_PHASE_METRIC, + "Query search phase execution times at the shard level, expressed as a histogram", + "ms" + ); + this.fetchPhaseMetric = meterRegistry.registerLongHistogram( + FETCH_SEARCH_PHASE_METRIC, + "Fetch search phase execution times at the shard level, expressed as a histogram", + "ms" + ); + } + + @Override + public void onQueryPhase(SearchContext searchContext, long tookInNanos) { + recordPhaseLatency(queryPhaseMetric, tookInNanos); + } + + @Override + public void onFetchPhase(SearchContext searchContext, long tookInNanos) { + recordPhaseLatency(fetchPhaseMetric, tookInNanos); + } + + private static void recordPhaseLatency(LongHistogram histogramMetric, long tookInNanos) { + Map attrs = ShardSearchPhaseAPMMetrics.THREAD_LOCAL_ATTRS.get(); + boolean isSystem = ((EsExecutors.EsThread) Thread.currentThread()).isSystem(); + attrs.put(SYSTEM_THREAD_ATTRIBUTE_NAME, isSystem); + histogramMetric.record(TimeUnit.NANOSECONDS.toMillis(tookInNanos), attrs); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 70ba9950f7689..d8bd460f6f819 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -245,7 +245,11 @@ public ShardSnapshotResult getShardSnapshotResult() { } public void ensureNotAborted() { - switch (stage.get()) { + ensureNotAborted(stage.get()); + } + + public static void ensureNotAborted(Stage shardSnapshotStage) { + switch (shardSnapshotStage) { case ABORTED -> throw new AbortedSnapshotException(); case PAUSING -> throw new PausedSnapshotException(); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java index 6ac7313a1c51b..2700cba0abc3c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java @@ -24,7 +24,7 @@ public final class TranslogDeletionPolicy { private final Map openTranslogRef; public void assertNoOpenTranslogRefs() { - if (openTranslogRef.isEmpty() == false) { + if (Assertions.ENABLED && openTranslogRef.isEmpty() == false) { AssertionError e = new AssertionError("not all translog generations have been released"); openTranslogRef.values().forEach(e::addSuppressed); throw e; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 706f788e8a310..3ac61bbca1a21 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -124,6 +124,7 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; @@ -263,6 +264,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CheckedBiConsumer requestCacheKeyDifferentiator; private final MapperMetrics mapperMetrics; private final PostRecoveryMerger postRecoveryMerger; + private final List searchOperationListeners; @Override protected void doStart() { @@ -379,8 +381,8 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.timestampFieldMapperService = new TimestampFieldMapperService(settings, threadPool, this); - this.postRecoveryMerger = new PostRecoveryMerger(settings, threadPool.executor(ThreadPool.Names.FORCE_MERGE), this::getShardOrNull); + this.searchOperationListeners = builder.searchOperationListener; } private static final String DANGLING_INDICES_UPDATE_THREAD_NAME = "DanglingIndices#updateTask"; @@ -752,7 +754,8 @@ private synchronized IndexService createIndexService( indexNameExpressionResolver, recoveryStateFactories, loadSlowLogFieldProvider(), - mapperMetrics + mapperMetrics, + searchOperationListeners ); for (IndexingOperationListener operationListener : indexingOperationListeners) { indexModule.addIndexOperationListener(operationListener); @@ -830,7 +833,8 @@ public synchronized MapperService createIndexMapperServiceForValidation(IndexMet indexNameExpressionResolver, recoveryStateFactories, loadSlowLogFieldProvider(), - mapperMetrics + mapperMetrics, + searchOperationListeners ); pluginsService.forEach(p -> p.onIndexModule(indexModule)); return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java index 8fff1f5bef51f..08d1b5ce3a96c 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; +import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.IndexStorePlugin; @@ -74,6 +75,7 @@ public class IndicesServiceBuilder { @Nullable CheckedBiConsumer requestCacheKeyDifferentiator; MapperMetrics mapperMetrics; + List searchOperationListener = List.of(); public IndicesServiceBuilder settings(Settings settings) { this.settings = settings; @@ -177,6 +179,15 @@ public IndicesServiceBuilder mapperMetrics(MapperMetrics mapperMetrics) { return this; } + public List searchOperationListeners() { + return searchOperationListener; + } + + public IndicesServiceBuilder searchOperationListeners(List searchOperationListener) { + this.searchOperationListener = searchOperationListener; + return this; + } + public IndicesService build() { Objects.requireNonNull(settings); Objects.requireNonNull(pluginsService); @@ -201,6 +212,7 @@ public IndicesService build() { Objects.requireNonNull(indexFoldersDeletionListeners); Objects.requireNonNull(snapshotCommitSuppliers); Objects.requireNonNull(mapperMetrics); + Objects.requireNonNull(searchOperationListener); // collect engine factory providers from plugins engineFactoryProviders = pluginsService.filterPlugins(EnginePlugin.class) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 308f1894b78db..c8d31d2060caf 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -397,6 +397,36 @@ record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, Str } indexShard.recoverLocallyUpToGlobalCheckpoint(ActionListener.assertOnce(l)); }) + // peer recovery can consume a lot of disk space, so it's worth cleaning up locally ahead of the attempt + // operation runs only if the previous operation succeeded, and returns the previous operation's result. + // Failures at this stage aren't fatal, we can attempt to recover and then clean up again at the end. #104473 + .andThenApply(startingSeqNo -> { + Store.MetadataSnapshot snapshot; + try { + snapshot = indexShard.snapshotStoreMetadata(); + } catch (IOException e) { + // We give up on the contents for any checked exception thrown by snapshotStoreMetadata. We don't want to + // allow those to bubble up and interrupt recovery because the subsequent recovery attempt is expected + // to fix up these problems for us if it completes successfully. + if (e instanceof org.apache.lucene.index.IndexNotFoundException) { + // this is the expected case on first recovery, so don't spam the logs with exceptions + logger.debug(() -> format("no snapshot found for shard %s, treating as empty", indexShard.shardId())); + } else { + logger.warn(() -> format("unable to load snapshot for shard %s, treating as empty", indexShard.shardId()), e); + } + snapshot = Store.MetadataSnapshot.EMPTY; + } + + Store store = indexShard.store(); + store.incRef(); + try { + logger.debug(() -> format("cleaning up index directory for %s before recovery", indexShard.shardId())); + store.cleanupAndVerify("cleanup before peer recovery", snapshot); + } finally { + store.decRef(); + } + return startingSeqNo; + }) // now construct the start-recovery request .andThenApply(startingSeqNo -> { assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index cd92f38e65152..c6e09f61befa0 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -74,6 +74,14 @@ default void init(Client client) {} InferenceServiceConfiguration getConfiguration(); + /** + * Whether this service should be hidden from the API. Should be used for services + * that are not ready to be used. + */ + default Boolean hideFromConfigurationApi() { + return Boolean.FALSE; + } + /** * The task types supported by the service * @return Set of supported. @@ -131,9 +139,10 @@ void chunkedInfer( /** * Start or prepare the model for use. * @param model The model + * @param timeout Start timeout * @param listener The listener */ - void start(Model model, ActionListener listener); + void start(Model model, TimeValue timeout, ActionListener listener); /** * Stop the model deployment. @@ -145,17 +154,6 @@ default void stop(UnparsedModel unparsedModel, ActionListener listener) listener.onResponse(true); } - /** - * Put the model definition (if applicable) - * The main purpose of this function is to download ELSER - * The default action does nothing except acknowledge the request (true). - * @param modelVariant The configuration of the model variant to be downloaded - * @param listener The listener - */ - default void putModel(Model modelVariant, ActionListener listener) { - listener.onResponse(true); - } - /** * Optionally test the new model configuration in the inference service. * This function should be called when the model is first created, the diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index 316f621e80669..8654142016572 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -169,4 +169,39 @@ public boolean equals(Object o) { public int hashCode() { return pipelines.hashCode(); } + + /** + * Returns a copy of this object with processor upgrades applied, if necessary. Otherwise, returns this object. + * + *

The given upgrader is applied to the config map for any processor of the given type. + */ + public IngestMetadata maybeUpgradeProcessors(String processorType, ProcessorConfigUpgrader processorConfigUpgrader) { + Map newPipelines = null; // as an optimization, we will lazily copy the map only if needed + for (Map.Entry entry : pipelines.entrySet()) { + String pipelineId = entry.getKey(); + PipelineConfiguration originalPipeline = entry.getValue(); + PipelineConfiguration upgradedPipeline = originalPipeline.maybeUpgradeProcessors(processorType, processorConfigUpgrader); + if (upgradedPipeline.equals(originalPipeline) == false) { + if (newPipelines == null) { + newPipelines = new HashMap<>(pipelines); + } + newPipelines.put(pipelineId, upgradedPipeline); + } + } + return newPipelines != null ? new IngestMetadata(newPipelines) : this; + } + + /** + * Functional interface for upgrading processor configs. An implementation of this will be associated with a specific processor type. + */ + public interface ProcessorConfigUpgrader { + + /** + * Upgrades the config for an individual processor of the appropriate type, if necessary. + * + * @param processorConfig The config to upgrade, which will be mutated if required + * @return Whether an upgrade was required + */ + boolean maybeUpgrade(Map processorConfig); + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 7406ee8837264..9067cdb2040fd 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -24,6 +24,7 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Map; import java.util.Objects; @@ -156,4 +157,36 @@ public int hashCode() { result = 31 * result + getConfigAsMap().hashCode(); return result; } + + /** + * Returns a copy of this object with processor upgrades applied, if necessary. Otherwise, returns this object. + * + *

The given upgrader is applied to the config map for any processor of the given type. + */ + PipelineConfiguration maybeUpgradeProcessors(String type, IngestMetadata.ProcessorConfigUpgrader upgrader) { + Map mutableConfigMap = getConfigAsMap(); + boolean changed = false; + // This should be a List of Maps, where the keys are processor types and the values are config maps. + // But we'll skip upgrading rather than fail if not. + if (mutableConfigMap.get(Pipeline.PROCESSORS_KEY) instanceof Iterable processors) { + for (Object processor : processors) { + if (processor instanceof Map processorMap && processorMap.get(type) instanceof Map targetProcessor) { + @SuppressWarnings("unchecked") // All XContent maps will be + Map processorConfigMap = (Map) targetProcessor; + if (upgrader.maybeUpgrade(processorConfigMap)) { + changed = true; + } + } + } + } + if (changed) { + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + return new PipelineConfiguration(id, BytesReference.bytes(builder.map(mutableConfigMap)), xContentType); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + return this; + } + } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index b424b417da82b..e8b9d18a1dd08 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; -import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.update.UpdateHelper; @@ -47,6 +46,7 @@ import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -116,6 +116,8 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.SourceFieldMetrics; +import org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics; +import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; @@ -232,6 +234,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -796,6 +799,9 @@ private void construct( threadPool::relativeTimeInMillis ); MapperMetrics mapperMetrics = new MapperMetrics(sourceFieldMetrics); + final List searchOperationListeners = List.of( + new ShardSearchPhaseAPMMetrics(telemetryProvider.getMeterRegistry()) + ); IndicesService indicesService = new IndicesServiceBuilder().settings(settings) .pluginsService(pluginsService) @@ -817,6 +823,7 @@ private void construct( .valuesSourceRegistry(searchModule.getValuesSourceRegistry()) .requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator()) .mapperMetrics(mapperMetrics) + .searchOperationListeners(searchOperationListeners) .build(); final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); @@ -970,7 +977,9 @@ private void construct( ); var indexTemplateMetadataUpgraders = pluginsService.map(Plugin::getIndexTemplateMetadataUpgrader).toList(); - modules.bindToInstance(MetadataUpgrader.class, new MetadataUpgrader(indexTemplateMetadataUpgraders)); + List>> customMetadataUpgraders = pluginsService.map(Plugin::getCustomMetadataUpgraders) + .toList(); + modules.bindToInstance(MetadataUpgrader.class, new MetadataUpgrader(indexTemplateMetadataUpgraders, customMetadataUpgraders)); final IndexMetadataVerifier indexMetadataVerifier = new IndexMetadataVerifier( settings, @@ -998,7 +1007,6 @@ private void construct( telemetryProvider.getTracer() ); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); - final SearchTransportAPMMetrics searchTransportAPMMetrics = new SearchTransportAPMMetrics(telemetryProvider.getMeterRegistry()); final SearchResponseMetrics searchResponseMetrics = new SearchResponseMetrics(telemetryProvider.getMeterRegistry()); final SearchTransportService searchTransportService = new SearchTransportService( transportService, @@ -1178,7 +1186,6 @@ private void construct( b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); - b.bind(SearchTransportAPMMetrics.class).toInstance(searchTransportAPMMetrics); b.bind(SearchResponseMetrics.class).toInstance(searchResponseMetrics); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(searchService::aggReduceContextBuilder)); @@ -1463,6 +1470,7 @@ private CircuitBreakerService createCircuitBreakerService( /** * Wrap a group of reloadable plugins into a single reloadable plugin interface + * * @param reloadablePlugins A list of reloadable plugins * @return A single ReloadablePlugin that, upon reload, reloads the plugins it wraps */ diff --git a/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java b/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java index 6ad66f75304d7..3db2d136ce347 100644 --- a/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java +++ b/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java @@ -14,16 +14,26 @@ import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.UnaryOperator; +import static java.util.stream.Collectors.collectingAndThen; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; + /** * Upgrades {@link Metadata} on startup on behalf of installed {@link Plugin}s */ public class MetadataUpgrader { public final UnaryOperator> indexTemplateMetadataUpgraders; + public final Map> customMetadataUpgraders; - public MetadataUpgrader(Collection>> indexTemplateMetadataUpgraders) { + public MetadataUpgrader( + Collection>> indexTemplateMetadataUpgraders, + Collection>> customMetadataUpgraders + ) { this.indexTemplateMetadataUpgraders = templates -> { Map upgradedTemplates = new HashMap<>(templates); for (UnaryOperator> upgrader : indexTemplateMetadataUpgraders) { @@ -31,5 +41,29 @@ public MetadataUpgrader(Collection map.entrySet().stream()) + .collect( + groupingBy( + // Group by the type of custom metadata to be upgraded (the entry key) + Map.Entry::getKey, + // For each type, extract the operators (the entry values), collect to a list, and make an operator which combines them + collectingAndThen(mapping(Map.Entry::getValue, toList()), CombiningCustomUpgrader::new) + ) + ); + } + + private record CombiningCustomUpgrader(List> upgraders) implements UnaryOperator { + + @Override + public Metadata.Custom apply(Metadata.Custom custom) { + Metadata.Custom upgraded = custom; + for (UnaryOperator upgrader : upgraders) { + upgraded = upgrader.apply(upgraded); + } + return upgraded; + } } + } diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 725cd271e10f8..1ccb5331a45d7 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -248,6 +249,22 @@ public UnaryOperator> getIndexTemplateMetadat return UnaryOperator.identity(); } + /** + * Returns operators to modify custom metadata in the cluster state on startup. + * + *

Each key of the map returned gives the type of custom to be modified. Each value is an operator to be applied to that custom + * metadata. The operator will be invoked with the result of calling {@link Metadata#custom(String)} with the map key as its argument, + * and should downcast the value accordingly. + * + *

Plugins should return an empty map if no upgrade is required. + * + *

The order of the upgrade calls is undefined and can change between runs. It is expected that plugins will modify only templates + * owned by them to avoid conflicts. + */ + public Map> getCustomMetadataUpgraders() { + return Map.of(); + } + /** * Provides the list of this plugin's custom thread pools, empty if * none. diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java index 2cd6e2b11ef7a..3a210199065b7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -127,16 +127,7 @@ public static Map createAttributesMap( OperationPurpose purpose, String operation ) { - return Map.of( - "repo_type", - repositoryMetadata.type(), - "repo_name", - repositoryMetadata.name(), - "operation", - operation, - "purpose", - purpose.getKey() - ); + return Map.of("repo_type", repositoryMetadata.type(), "operation", operation, "purpose", purpose.getKey()); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b43fe05a541f6..8c847da344fe5 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -191,6 +191,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { private static final Logger logger = LogManager.getLogger(BlobStoreRepository.class); + private class ShutdownLogger { + // Creating a separate logger so that the log-level can be manipulated separately from the parent class. + private static final Logger shutdownLogger = LogManager.getLogger(ShutdownLogger.class); + } + protected volatile RepositoryMetadata metadata; protected final ThreadPool threadPool; @@ -3467,10 +3472,37 @@ private void doSnapshotShard(SnapshotShardContext context) { } private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, IndexShardSnapshotStatus snapshotStatus, String fileName) { + var shardSnapshotStage = snapshotStatus.getStage(); try { - snapshotStatus.ensureNotAborted(); + IndexShardSnapshotStatus.ensureNotAborted(shardSnapshotStage); + + if (shardSnapshotStage != IndexShardSnapshotStatus.Stage.INIT && shardSnapshotStage != IndexShardSnapshotStatus.Stage.STARTED) { + // A normally running shard snapshot should be in stage INIT or STARTED. And we know it's not in PAUSING or ABORTED because + // the ensureNotAborted() call above did not throw. The remaining options don't make sense, if they ever happen. + logger.error( + () -> Strings.format( + "Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]", + shardId, + snapshotId, + shardSnapshotStage + ) + ); + assert false; + } } catch (Exception e) { - logger.debug("[{}] [{}] {} on the file [{}], exiting", shardId, snapshotId, e.getMessage(), fileName); + // We want to see when a shard snapshot operation checks for and finds an interrupt signal during shutdown. A + // PausedSnapshotException indicates we're in shutdown because that's the only case when shard snapshots are signaled to pause. + // An AbortedSnapshotException may also occur during shutdown if an uncommon error occurs. + ShutdownLogger.shutdownLogger.debug( + () -> Strings.format( + "Shard snapshot operation is aborting. ShardId [%s], SnapshotID [%s], File [%s], Stage [%s]", + shardId, + snapshotId, + fileName, + shardSnapshotStage + ), + e + ); assert e instanceof AbortedSnapshotException || e instanceof PausedSnapshotException : e; throw e; } diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index fd8b90a99e7f6..29cae343fb09e 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -16,6 +16,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -43,6 +45,7 @@ public final class RestResponse implements Releasable { static final String STATUS = "status"; private static final Logger SUPPRESSED_ERROR_LOGGER = LogManager.getLogger("rest.suppressed"); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(AbstractRestChannel.class); private final RestStatus status; @@ -142,6 +145,16 @@ public RestResponse(RestChannel channel, RestStatus status, Exception e) throws if (params.paramAsBoolean("error_trace", false) && status != RestStatus.UNAUTHORIZED) { params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } + + if (channel.detailedErrorsEnabled() == false) { + deprecationLogger.warn( + DeprecationCategory.API, + "http_detailed_errors", + "The JSON format of non-detailed errors will change in Elasticsearch 9.0 to match the JSON structure" + + " used for detailed errors. To keep using the existing format, use the V8 REST API." + ); + } + try (XContentBuilder builder = channel.newErrorBuilder()) { build(builder, params, status, channel.detailedErrorsEnabled(), e); this.content = BytesReference.bytes(builder); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java index 338dabb23ab4f..7b57481ad5716 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java @@ -36,6 +36,10 @@ private SearchCapabilities() {} private static final String KQL_QUERY_SUPPORTED = "kql_query"; /** Support multi-dense-vector field mapper. */ private static final String MULTI_DENSE_VECTOR_FIELD_MAPPER = "multi_dense_vector_field_mapper"; + /** Support propagating nested retrievers' inner_hits to top-level compound retrievers . */ + private static final String NESTED_RETRIEVER_INNER_HITS_SUPPORT = "nested_retriever_inner_hits_support"; + /** Support multi-dense-vector script field access. */ + private static final String MULTI_DENSE_VECTOR_SCRIPT_ACCESS = "multi_dense_vector_script_access"; public static final Set CAPABILITIES; static { @@ -45,8 +49,10 @@ private SearchCapabilities() {} capabilities.add(BYTE_FLOAT_BIT_DOT_PRODUCT_CAPABILITY); capabilities.add(DENSE_VECTOR_DOCVALUE_FIELDS); capabilities.add(TRANSFORM_RANK_RRF_TO_RETRIEVER); + capabilities.add(NESTED_RETRIEVER_INNER_HITS_SUPPORT); if (MultiDenseVectorFieldMapper.FEATURE_FLAG.isEnabled()) { capabilities.add(MULTI_DENSE_VECTOR_FIELD_MAPPER); + capabilities.add(MULTI_DENSE_VECTOR_SCRIPT_ACCESS); } if (Build.current().isSnapshot()) { capabilities.add(KQL_QUERY_SUPPORTED); diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVector.java new file mode 100644 index 0000000000000..24e19a803ff38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVector.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; + +import java.util.Iterator; + +public class BitMultiDenseVector extends ByteMultiDenseVector { + public BitMultiDenseVector(Iterator vectorValues, BytesRef magnitudesBytes, int numVecs, int dims) { + super(vectorValues, magnitudesBytes, numVecs, dims); + } + + @Override + public void checkDimensions(int qvDims) { + if (qvDims != dims) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + + qvDims * Byte.SIZE + + "] than the document vectors [" + + dims * Byte.SIZE + + "]." + ); + } + } + + @Override + public int getDims() { + return dims * Byte.SIZE; + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..35a43eabb8f0c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVectorDocValuesField.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; + +public class BitMultiDenseVectorDocValuesField extends ByteMultiDenseVectorDocValuesField { + + public BitMultiDenseVectorDocValuesField( + BinaryDocValues input, + BinaryDocValues magnitudes, + String name, + ElementType elementType, + int dims + ) { + super(input, magnitudes, name, elementType, dims / 8); + } + + @Override + protected MultiDenseVector getVector() { + return new BitMultiDenseVector(vectorValue, magnitudesValue, numVecs, dims); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVector.java new file mode 100644 index 0000000000000..e610d10146b2f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVector.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.vectors.VectorEncoderDecoder; + +import java.util.Iterator; + +public class ByteMultiDenseVector implements MultiDenseVector { + + protected final Iterator vectorValues; + protected final int numVecs; + protected final int dims; + + private Iterator floatDocVectors; + private float[] magnitudes; + private final BytesRef magnitudesBytes; + + public ByteMultiDenseVector(Iterator vectorValues, BytesRef magnitudesBytes, int numVecs, int dims) { + assert magnitudesBytes.length == numVecs * Float.BYTES; + this.vectorValues = vectorValues; + this.numVecs = numVecs; + this.dims = dims; + this.magnitudesBytes = magnitudesBytes; + } + + @Override + public Iterator getVectors() { + if (floatDocVectors == null) { + floatDocVectors = new ByteToFloatIteratorWrapper(vectorValues, dims); + } + return floatDocVectors; + } + + @Override + public float[] getMagnitudes() { + if (magnitudes == null) { + magnitudes = VectorEncoderDecoder.getMultiMagnitudes(magnitudesBytes); + } + return magnitudes; + } + + @Override + public boolean isEmpty() { + return false; + } + + @Override + public int getDims() { + return dims; + } + + @Override + public int size() { + return numVecs; + } + + static class ByteToFloatIteratorWrapper implements Iterator { + private final Iterator byteIterator; + private final float[] buffer; + private final int dims; + + ByteToFloatIteratorWrapper(Iterator byteIterator, int dims) { + this.byteIterator = byteIterator; + this.buffer = new float[dims]; + this.dims = dims; + } + + @Override + public boolean hasNext() { + return byteIterator.hasNext(); + } + + @Override + public float[] next() { + byte[] next = byteIterator.next(); + for (int i = 0; i < dims; i++) { + buffer[i] = next[i]; + } + return buffer; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..d1e062e0a3dee --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVectorDocValuesField.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues; + +import java.io.IOException; +import java.util.Iterator; + +public class ByteMultiDenseVectorDocValuesField extends MultiDenseVectorDocValuesField { + + protected final BinaryDocValues input; + private final BinaryDocValues magnitudes; + protected final int dims; + protected int numVecs; + protected Iterator vectorValue; + protected boolean decoded; + protected BytesRef value; + protected BytesRef magnitudesValue; + private byte[] buffer; + + public ByteMultiDenseVectorDocValuesField( + BinaryDocValues input, + BinaryDocValues magnitudes, + String name, + ElementType elementType, + int dims + ) { + super(name, elementType); + this.input = input; + this.dims = dims; + this.buffer = new byte[dims]; + this.magnitudes = magnitudes; + } + + @Override + public void setNextDocId(int docId) throws IOException { + decoded = false; + if (input.advanceExact(docId)) { + boolean magnitudesFound = magnitudes.advanceExact(docId); + assert magnitudesFound; + value = input.binaryValue(); + assert value.length % dims == 0; + numVecs = value.length / dims; + magnitudesValue = magnitudes.binaryValue(); + assert magnitudesValue.length == (numVecs * Float.BYTES); + } else { + value = null; + magnitudesValue = null; + vectorValue = null; + numVecs = 0; + } + } + + @Override + public MultiDenseVectorScriptDocValues toScriptDocValues() { + return new MultiDenseVectorScriptDocValues(this, dims); + } + + protected MultiDenseVector getVector() { + return new ByteMultiDenseVector(vectorValue, magnitudesValue, numVecs, dims); + } + + @Override + public MultiDenseVector get() { + if (isEmpty()) { + return MultiDenseVector.EMPTY; + } + decodeVectorIfNecessary(); + return getVector(); + } + + @Override + public MultiDenseVector get(MultiDenseVector defaultValue) { + if (isEmpty()) { + return defaultValue; + } + decodeVectorIfNecessary(); + return getVector(); + } + + @Override + public MultiDenseVector getInternal() { + return get(null); + } + + private void decodeVectorIfNecessary() { + if (decoded == false && value != null) { + vectorValue = new ByteVectorIterator(value, buffer, numVecs); + decoded = true; + } + } + + @Override + public int size() { + return value == null ? 0 : value.length / dims; + } + + @Override + public boolean isEmpty() { + return value == null; + } + + static class ByteVectorIterator implements Iterator { + private final byte[] buffer; + private final BytesRef vectorValues; + private final int size; + private int idx = 0; + + ByteVectorIterator(BytesRef vectorValues, byte[] buffer, int size) { + assert vectorValues.length == (buffer.length * size); + this.vectorValues = vectorValues; + this.size = size; + this.buffer = buffer; + } + + @Override + public boolean hasNext() { + return idx < size; + } + + @Override + public byte[] next() { + if (hasNext() == false) { + throw new IllegalArgumentException("No more elements in the iterator"); + } + System.arraycopy(vectorValues.bytes, vectorValues.offset + idx * buffer.length, buffer, 0, buffer.length); + idx++; + return buffer; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVector.java new file mode 100644 index 0000000000000..9ffe8b3b970c4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVector.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; + +import java.util.Iterator; + +import static org.elasticsearch.index.mapper.vectors.VectorEncoderDecoder.getMultiMagnitudes; + +public class FloatMultiDenseVector implements MultiDenseVector { + + private final BytesRef magnitudes; + private float[] magnitudesArray = null; + private final int dims; + private final int numVectors; + private final Iterator decodedDocVector; + + public FloatMultiDenseVector(Iterator decodedDocVector, BytesRef magnitudes, int numVectors, int dims) { + assert magnitudes.length == numVectors * Float.BYTES; + this.decodedDocVector = decodedDocVector; + this.magnitudes = magnitudes; + this.numVectors = numVectors; + this.dims = dims; + } + + @Override + public Iterator getVectors() { + return decodedDocVector; + } + + @Override + public float[] getMagnitudes() { + if (magnitudesArray == null) { + magnitudesArray = getMultiMagnitudes(magnitudes); + } + return magnitudesArray; + } + + @Override + public boolean isEmpty() { + return false; + } + + @Override + public int getDims() { + return dims; + } + + @Override + public int size() { + return numVectors; + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..356db58d989c5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVectorDocValuesField.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.FloatBuffer; +import java.util.Iterator; + +public class FloatMultiDenseVectorDocValuesField extends MultiDenseVectorDocValuesField { + + private final BinaryDocValues input; + private final BinaryDocValues magnitudes; + private boolean decoded; + private final int dims; + private BytesRef value; + private BytesRef magnitudesValue; + private FloatVectorIterator vectorValues; + private int numVectors; + private float[] buffer; + + public FloatMultiDenseVectorDocValuesField( + BinaryDocValues input, + BinaryDocValues magnitudes, + String name, + ElementType elementType, + int dims + ) { + super(name, elementType); + this.input = input; + this.magnitudes = magnitudes; + this.dims = dims; + this.buffer = new float[dims]; + } + + @Override + public void setNextDocId(int docId) throws IOException { + decoded = false; + if (input.advanceExact(docId)) { + boolean magnitudesFound = magnitudes.advanceExact(docId); + assert magnitudesFound; + + value = input.binaryValue(); + assert value.length % (Float.BYTES * dims) == 0; + numVectors = value.length / (Float.BYTES * dims); + magnitudesValue = magnitudes.binaryValue(); + assert magnitudesValue.length == (Float.BYTES * numVectors); + } else { + value = null; + magnitudesValue = null; + numVectors = 0; + } + } + + @Override + public MultiDenseVectorScriptDocValues toScriptDocValues() { + return new MultiDenseVectorScriptDocValues(this, dims); + } + + @Override + public boolean isEmpty() { + return value == null; + } + + @Override + public MultiDenseVector get() { + if (isEmpty()) { + return MultiDenseVector.EMPTY; + } + decodeVectorIfNecessary(); + return new FloatMultiDenseVector(vectorValues, magnitudesValue, numVectors, dims); + } + + @Override + public MultiDenseVector get(MultiDenseVector defaultValue) { + if (isEmpty()) { + return defaultValue; + } + decodeVectorIfNecessary(); + return new FloatMultiDenseVector(vectorValues, magnitudesValue, numVectors, dims); + } + + @Override + public MultiDenseVector getInternal() { + return get(null); + } + + @Override + public int size() { + return value == null ? 0 : value.length / (Float.BYTES * dims); + } + + private void decodeVectorIfNecessary() { + if (decoded == false && value != null) { + vectorValues = new FloatVectorIterator(value, buffer, numVectors); + decoded = true; + } + } + + static class FloatVectorIterator implements Iterator { + private final float[] buffer; + private final FloatBuffer vectorValues; + private final int size; + private int idx = 0; + + FloatVectorIterator(BytesRef vectorValues, float[] buffer, int size) { + assert vectorValues.length == (buffer.length * Float.BYTES * size); + this.vectorValues = ByteBuffer.wrap(vectorValues.bytes, vectorValues.offset, vectorValues.length) + .order(ByteOrder.LITTLE_ENDIAN) + .asFloatBuffer(); + this.size = size; + this.buffer = buffer; + } + + @Override + public boolean hasNext() { + return idx < size; + } + + @Override + public float[] next() { + if (hasNext() == false) { + throw new IllegalArgumentException("No more elements in the iterator"); + } + vectorValues.get(buffer); + idx++; + return buffer; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVector.java new file mode 100644 index 0000000000000..85c851dbe545c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVector.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import java.util.Iterator; + +public interface MultiDenseVector { + + default void checkDimensions(int qvDims) { + checkDimensions(getDims(), qvDims); + } + + Iterator getVectors(); + + float[] getMagnitudes(); + + boolean isEmpty(); + + int getDims(); + + int size(); + + static void checkDimensions(int dvDims, int qvDims) { + if (dvDims != qvDims) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + qvDims + "] than the document vectors [" + dvDims + "]." + ); + } + } + + private static String badQueryVectorType(Object queryVector) { + return "Cannot use vector [" + queryVector + "] with class [" + queryVector.getClass().getName() + "] as query vector"; + } + + MultiDenseVector EMPTY = new MultiDenseVector() { + public static final String MISSING_VECTOR_FIELD_MESSAGE = "Multi Dense vector value missing for a field," + + " use isEmpty() to check for a missing vector value"; + + @Override + public Iterator getVectors() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public float[] getMagnitudes() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public boolean isEmpty() { + return true; + } + + @Override + public int getDims() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public int size() { + return 0; + } + }; +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..61ae4304683c8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVectorDocValuesField.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues; +import org.elasticsearch.script.field.AbstractScriptFieldFactory; +import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.Field; + +import java.util.Iterator; + +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; + +public abstract class MultiDenseVectorDocValuesField extends AbstractScriptFieldFactory + implements + Field, + DocValuesScriptFieldFactory, + MultiDenseVectorScriptDocValues.MultiDenseVectorSupplier { + protected final String name; + protected final ElementType elementType; + + public MultiDenseVectorDocValuesField(String name, ElementType elementType) { + this.name = name; + this.elementType = elementType; + } + + @Override + public String getName() { + return name; + } + + public ElementType getElementType() { + return elementType; + } + + /** + * Get the DenseVector for a document if one exists, DenseVector.EMPTY otherwise + */ + public abstract MultiDenseVector get(); + + public abstract MultiDenseVector get(MultiDenseVector defaultValue); + + public abstract MultiDenseVectorScriptDocValues toScriptDocValues(); + + // DenseVector fields are single valued, so Iterable does not make sense. + @Override + public Iterator iterator() { + throw new UnsupportedOperationException("Cannot iterate over single valued multi_dense_vector field, use get() instead"); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index bdefee988248f..a1e8eb25f4780 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.DelayableWriteable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -22,8 +23,8 @@ import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import java.io.IOException; @@ -260,7 +261,7 @@ private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resol this.formatSortValues = formatSortValues; } - public DateTime(StreamInput in) throws IOException { + private DateTime(StreamInput in) throws IOException { String formatterPattern = in.readString(); Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES) ? LocaleUtils.parse(in.readString()) @@ -285,6 +286,14 @@ public String getWriteableName() { return NAME; } + public static DateTime readFrom(StreamInput in) throws IOException { + final DateTime dateTime = new DateTime(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(dateTime); + } + return dateTime; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); @@ -528,7 +537,7 @@ public Decimal(String pattern) { this.format = new DecimalFormat(pattern, SYMBOLS); } - public Decimal(StreamInput in) throws IOException { + private Decimal(StreamInput in) throws IOException { this(in.readString()); } @@ -537,6 +546,14 @@ public String getWriteableName() { return NAME; } + public static Decimal readFrom(StreamInput in) throws IOException { + final Decimal decimal = new Decimal(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(decimal); + } + return decimal; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(pattern); @@ -729,7 +746,7 @@ public Object format(BytesRef value) { try { // NOTE: if the tsid is a map of dimension key/value pairs (as it was before introducing // tsid hashing) we just decode the map and return it. - return TimeSeriesIdFieldMapper.decodeTsidAsMap(value); + return RoutingPathFields.decodeAsMap(value); } catch (Exception e) { // NOTE: otherwise the _tsid field is just a hash and we can't decode it return TimeSeriesIdFieldMapper.encodeTsid(value); @@ -760,20 +777,20 @@ private BytesRef parseBytesRefMap(Object value) { } Map m = (Map) value; - TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (Map.Entry entry : m.entrySet()) { String f = entry.getKey().toString(); Object v = entry.getValue(); if (v instanceof String s) { - builder.addString(f, s); + routingPathFields.addString(f, s); } else if (v instanceof Long l) { - builder.addLong(f, l); + routingPathFields.addLong(f, l); } else if (v instanceof Integer i) { - builder.addLong(f, i.longValue()); + routingPathFields.addLong(f, i.longValue()); } else if (v instanceof BigInteger ul) { long ll = UNSIGNED_LONG_SHIFTED.parseLong(ul.toString(), false, () -> 0L); - builder.addUnsignedLong(f, ll); + routingPathFields.addUnsignedLong(f, ll); } else { throw new IllegalArgumentException("Unexpected value in tsid object [" + v + "]"); } @@ -781,7 +798,7 @@ private BytesRef parseBytesRefMap(Object value) { try { // NOTE: we can decode the tsid only if it is not hashed (represented as a map) - return builder.buildLegacyTsid().toBytesRef(); + return TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); } catch (IOException e) { throw new IllegalArgumentException(e); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index fd39a95bdb75d..b8f50c6f9a62f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.index.query.RegexpQueryBuilder; import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.index.query.SimpleQueryStringBuilder; @@ -238,7 +239,6 @@ import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.StandardRetrieverBuilder; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.GeoDistanceSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -1013,8 +1013,8 @@ private void registerScoreFunction(ScoreFunctionSpec scoreFunction) { private void registerValueFormats() { registerValueFormat(DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN); - registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new); - registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new); + registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom); + registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom); registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH); registerValueFormat(DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE); registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index be96b4e25d841..a11c4013a9c9b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1285,13 +1285,17 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc ); if (query != null) { QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(query, innerHitsRewriteContext, true); - InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + if (false == source.skipInnerHits()) { + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + } searchExecutionContext.setAliasFilter(context.request().getAliasFilter().getQueryBuilder()); context.parsedQuery(searchExecutionContext.toQuery(query)); } if (source.postFilter() != null) { QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(source.postFilter(), innerHitsRewriteContext, true); - InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + if (false == source.skipInnerHits()) { + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + } context.parsedPostFilter(searchExecutionContext.toQuery(source.postFilter())); } if (innerHitBuilders.size() > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 38cab1761d409..b829afb0c23b0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -35,7 +35,6 @@ */ public abstract class InternalAggregation implements Aggregation, NamedWriteable { protected final String name; - protected final Map metadata; /** @@ -53,12 +52,14 @@ protected InternalAggregation(String name, Map metadata) { */ protected InternalAggregation(StreamInput in) throws IOException { final String name = in.readString(); + final Map metadata = in.readGenericMap(); if (in instanceof DelayableWriteable.Deduplicator d) { this.name = d.deduplicate(name); + this.metadata = metadata == null || metadata.isEmpty() ? metadata : d.deduplicate(metadata); } else { this.name = name; + this.metadata = metadata; } - metadata = in.readGenericMap(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index cb5e841a3df77..098a2b2f45d2f 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -214,6 +214,8 @@ public static HighlightBuilder highlight() { private Map runtimeMappings = emptyMap(); + private boolean skipInnerHits = false; + /** * Constructs a new search source builder. */ @@ -290,6 +292,12 @@ public SearchSourceBuilder(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { rankBuilder = in.readOptionalNamedWriteable(RankBuilder.class); } + if (in.getTransportVersion().isPatchFrom(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16) + || in.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { + skipInnerHits = in.readBoolean(); + } else { + skipInnerHits = false; + } } @Override @@ -379,6 +387,10 @@ public void writeTo(StreamOutput out) throws IOException { } else if (rankBuilder != null) { throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]"); } + if (out.getTransportVersion().isPatchFrom(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16) + || out.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { + out.writeBoolean(skipInnerHits); + } } /** @@ -1280,6 +1292,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.collapse = collapse; rewrittenBuilder.pointInTimeBuilder = pointInTimeBuilder; rewrittenBuilder.runtimeMappings = runtimeMappings; + rewrittenBuilder.skipInnerHits = skipInnerHits; return rewrittenBuilder; } @@ -1850,6 +1863,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public SearchSourceBuilder skipInnerHits(boolean skipInnerHits) { + this.skipInnerHits = skipInnerHits; + return this; + } + + public boolean skipInnerHits() { + return this.skipInnerHits; + } + public static class IndexBoost implements Writeable, ToXContentObject { private final String index; private final float boost; @@ -2104,7 +2126,8 @@ public int hashCode() { collapse, trackTotalHitsUpTo, pointInTimeBuilder, - runtimeMappings + runtimeMappings, + skipInnerHits ); } @@ -2149,7 +2172,8 @@ public boolean equals(Object obj) { && Objects.equals(collapse, other.collapse) && Objects.equals(trackTotalHitsUpTo, other.trackTotalHitsUpTo) && Objects.equals(pointInTimeBuilder, other.pointInTimeBuilder) - && Objects.equals(runtimeMappings, other.runtimeMappings); + && Objects.equals(runtimeMappings, other.runtimeMappings) + && Objects.equals(skipInnerHits, other.skipInnerHits); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 6bc667d4359b1..546586a9ff3c3 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -191,7 +191,16 @@ protected SearchHit nextDoc(int doc) throws IOException { } }; - SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), docIdsToLoad); + SearchHit[] hits = docsIterator.iterate( + context.shardTarget(), + context.searcher().getIndexReader(), + docIdsToLoad, + context.request().allowPartialSearchResults() + ); + + if (docsIterator.isTimedOut()) { + context.queryResult().searchTimedOut(true); + } if (context.isCancelled()) { for (SearchHit hit : hits) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java index 682ee4b375668..df4e7649ffd3b 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java @@ -13,7 +13,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.query.SearchTimeoutException; import java.io.IOException; import java.util.Arrays; @@ -27,6 +30,12 @@ */ abstract class FetchPhaseDocsIterator { + private boolean timedOut = false; + + public boolean isTimedOut() { + return timedOut; + } + /** * Called when a new leaf reader is reached * @param ctx the leaf reader for this set of doc ids @@ -44,7 +53,7 @@ abstract class FetchPhaseDocsIterator { /** * Iterate over a set of docsIds within a particular shard and index reader */ - public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader indexReader, int[] docIds) { + public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader indexReader, int[] docIds, boolean allowPartialResults) { SearchHit[] searchHits = new SearchHit[docIds.length]; DocIdToIndex[] docs = new DocIdToIndex[docIds.length]; for (int index = 0; index < docIds.length; index++) { @@ -58,30 +67,55 @@ public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader inde LeafReaderContext ctx = indexReader.leaves().get(leafOrd); int endReaderIdx = endReaderIdx(ctx, 0, docs); int[] docsInLeaf = docIdsInLeaf(0, endReaderIdx, docs, ctx.docBase); - setNextReader(ctx, docsInLeaf); - for (int i = 0; i < docs.length; i++) { - if (i >= endReaderIdx) { - leafOrd = ReaderUtil.subIndex(docs[i].docId, indexReader.leaves()); - ctx = indexReader.leaves().get(leafOrd); - endReaderIdx = endReaderIdx(ctx, i, docs); - docsInLeaf = docIdsInLeaf(i, endReaderIdx, docs, ctx.docBase); - setNextReader(ctx, docsInLeaf); + try { + setNextReader(ctx, docsInLeaf); + } catch (ContextIndexSearcher.TimeExceededException timeExceededException) { + if (allowPartialResults) { + timedOut = true; + return SearchHits.EMPTY; } - currentDoc = docs[i].docId; - assert searchHits[docs[i].index] == null; - searchHits[docs[i].index] = nextDoc(docs[i].docId); + throw new SearchTimeoutException(shardTarget, "Time exceeded"); } - } catch (Exception e) { - for (SearchHit searchHit : searchHits) { - if (searchHit != null) { - searchHit.decRef(); + for (int i = 0; i < docs.length; i++) { + try { + if (i >= endReaderIdx) { + leafOrd = ReaderUtil.subIndex(docs[i].docId, indexReader.leaves()); + ctx = indexReader.leaves().get(leafOrd); + endReaderIdx = endReaderIdx(ctx, i, docs); + docsInLeaf = docIdsInLeaf(i, endReaderIdx, docs, ctx.docBase); + setNextReader(ctx, docsInLeaf); + } + currentDoc = docs[i].docId; + assert searchHits[docs[i].index] == null; + searchHits[docs[i].index] = nextDoc(docs[i].docId); + } catch (ContextIndexSearcher.TimeExceededException timeExceededException) { + if (allowPartialResults) { + timedOut = true; + SearchHit[] partialSearchHits = new SearchHit[i]; + System.arraycopy(searchHits, 0, partialSearchHits, 0, i); + return partialSearchHits; + } + purgeSearchHits(searchHits); + throw new SearchTimeoutException(shardTarget, "Time exceeded"); } } + } catch (SearchTimeoutException e) { + throw e; + } catch (Exception e) { + purgeSearchHits(searchHits); throw new FetchPhaseExecutionException(shardTarget, "Error running fetch phase for doc [" + currentDoc + "]", e); } return searchHits; } + private static void purgeSearchHits(SearchHit[] searchHits) { + for (SearchHit searchHit : searchHits) { + if (searchHit != null) { + searchHit.decRef(); + } + } + } + private static int endReaderIdx(LeafReaderContext currentReaderContext, int index, DocIdToIndex[] docs) { int firstInNextReader = currentReaderContext.docBase + currentReaderContext.reader().maxDoc(); int i = index + 1; diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index b15798db95b6f..db839de9f573a 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -236,7 +236,7 @@ public int doHashCode() { return Objects.hash(innerRetrievers); } - protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { + protected final SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit) .trackTotalHits(false) .storedFields(new StoredFieldsContext(false)) @@ -254,6 +254,11 @@ protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, } sortBuilders.add(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); sourceBuilder.sort(sortBuilders); + sourceBuilder.skipInnerHits(true); + return finalizeSourceBuilder(sourceBuilder); + } + + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder sourceBuilder) { return sourceBuilder; } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index facda1a30a5ac..8be9a78dae154 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -15,8 +15,8 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java index 535db5c8fe28e..02f890f51d011 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java @@ -12,9 +12,9 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java index 2cb960e7e73cb..ebbdf58cc8c4f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java @@ -283,7 +283,7 @@ private static int[] findSegmentStarts(IndexReader reader, RankDoc[] docs) { return starts; } - RankDoc[] rankDocs() { + public RankDoc[] rankDocs() { return docs; } diff --git a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java index a97d22a976631..32634043cfc98 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java +++ b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java @@ -68,7 +68,7 @@ public Map getBuilders(Settings settings, int allocated settings, ThreadPool.Names.SEARCH, searchOrGetThreadPoolSize, - 1000, + searchOrGetThreadPoolSize * 1000, new EsExecutors.TaskTrackingConfig(true, searchAutoscalingEWMA) ) ); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 0155ab34ae637..f55e3740aaa8f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProvider; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.SizeValue; @@ -65,7 +66,7 @@ * Manages all the Java thread pools we create. {@link Names} contains a list of the thread pools, but plugins can dynamically add more * thread pools to instantiate. */ -public class ThreadPool implements ReportingService, Scheduler { +public class ThreadPool implements ReportingService, Scheduler, TimeProvider { private static final Logger logger = LogManager.getLogger(ThreadPool.class); @@ -362,12 +363,7 @@ protected ThreadPool() { this.scheduler = null; } - /** - * Returns a value of milliseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ + @Override public long relativeTimeInMillis() { return cachedTimeThread.relativeTimeInMillis(); } @@ -379,37 +375,17 @@ public LongSupplier relativeTimeInMillisSupplier() { return relativeTimeInMillisSupplier; } - /** - * Returns a value of nanoseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ + @Override public long relativeTimeInNanos() { return cachedTimeThread.relativeTimeInNanos(); } - /** - * Returns a value of milliseconds that may be used for relative time calculations. Similar to {@link #relativeTimeInMillis()} except - * that this method is more expensive: the return value is computed directly from {@link System#nanoTime} and is not cached. You should - * use {@link #relativeTimeInMillis()} unless the extra accuracy offered by this method is worth the costs. - * - * When computing a time interval by comparing relative times in milliseconds, you should make sure that both endpoints use cached - * values returned from {@link #relativeTimeInMillis()} or that they both use raw values returned from this method. It doesn't really - * make sense to compare a raw value to a cached value, even if in practice the result of such a comparison will be approximately - * sensible. - */ + @Override public long rawRelativeTimeInMillis() { return TimeValue.nsecToMSec(System.nanoTime()); } - /** - * Returns the value of milliseconds since UNIX epoch. - * - * This method should only be used for exact date/time formatting. For calculating - * time deltas that should not suffer from negative deltas, which are possible with - * this method, see {@link #relativeTimeInMillis()}. - */ + @Override public long absoluteTimeInMillis() { return cachedTimeThread.absoluteTimeInMillis(); } diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index 5cd8935f72403..37b9b5836ca5f 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -7,6 +7,7 @@ # License v3.0 only", or the "Server Side Public License, v 1". # +org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures org.elasticsearch.action.bulk.BulkFeatures org.elasticsearch.features.FeatureInfrastructureFeatures org.elasticsearch.health.HealthFeatures diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index b0ef5b780e775..ba575cc642a81 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -131,3 +131,5 @@ 8.15.1,8702002 8.15.2,8702003 8.15.3,8702003 +8.15.4,8702003 +8.16.0,8772001 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index e3681cc975988..c54aea88613f5 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -131,3 +131,5 @@ 8.15.1,8512000 8.15.2,8512000 8.15.3,8512000 +8.15.4,8512000 +8.16.0,8518000 diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index bb4aa9beeb42e..3dafc8f000f3f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -101,7 +101,7 @@ public void testDeleteDesiredBalance() throws Exception { var clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings); var delegate = new BalancedShardsAllocator(); - var computer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegate) { + var computer = new DesiredBalanceComputer(clusterSettings, threadPool, delegate) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -122,7 +122,8 @@ public DesiredBalance compute( clusterService, computer, (state, action) -> state, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = new MockAllocationService( randomAllocationDeciders(settings, clusterSettings), diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index 0b9cba837583d..5cf7b438b41ab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -42,6 +42,7 @@ import java.io.IOException; import java.io.Reader; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -250,6 +251,32 @@ public void testFillsAttributes() throws IOException { assertEquals("", tokens.get(3).getType()); } + public void testAnalyzerWithTwoTextsAndNoIndexName() throws IOException { + AnalyzeAction.Request request = new AnalyzeAction.Request(); + + for (String analyzer : Arrays.asList("standard", "simple", "stop", "keyword", "whitespace", "classic")) { + request.analyzer(analyzer); + request.text("a a", "b b"); + + AnalyzeAction.Response analyzeIndex = TransportAnalyzeAction.analyze(request, registry, mockIndexService(), maxTokenCount); + List tokensIndex = analyzeIndex.getTokens(); + + AnalyzeAction.Response analyzeNoIndex = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); + List tokensNoIndex = analyzeNoIndex.getTokens(); + + assertEquals(tokensIndex.size(), tokensNoIndex.size()); + for (int i = 0; i < tokensIndex.size(); i++) { + AnalyzeAction.AnalyzeToken withIndex = tokensIndex.get(i); + AnalyzeAction.AnalyzeToken withNoIndex = tokensNoIndex.get(i); + + assertEquals(withIndex.getStartOffset(), withNoIndex.getStartOffset()); + assertEquals(withIndex.getEndOffset(), withNoIndex.getEndOffset()); + assertEquals(withIndex.getPosition(), withNoIndex.getPosition()); + assertEquals(withIndex.getType(), withNoIndex.getType()); + } + } + } + public void testWithIndexAnalyzers() throws IOException { AnalyzeAction.Request request = new AnalyzeAction.Request(); request.text("the quick brown fox"); diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 32297e0c09b8f..9d74c2069ec10 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -128,6 +128,12 @@ public void testAutoGenerateId() { assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); } + public void testAutoGenerateTimeBasedId() { + IndexRequest request = new IndexRequest("index"); + request.autoGenerateTimeBasedId(); + assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); + } + public void testIndexResponse() { ShardId shardId = new ShardId(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomIntBetween(0, 1000)); String id = randomAlphaOfLengthBetween(3, 10); diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 762a7e0f47cab..dda20dfb37e9d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -8,35 +8,65 @@ */ package org.elasticsearch.action.search; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.store.MockDirectoryWrapper; +import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.IdLoader; +import org.elasticsearch.index.mapper.MapperMetrics; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.fetch.FetchSubPhaseProcessor; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.SearchTimeoutException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; +import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.transport.Transport; +import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -749,4 +779,159 @@ private static void addProfiling(boolean profiled, QuerySearchResult queryResult private static ProfileResult fetchProfile(boolean profiled) { return profiled ? new ProfileResult("fetch", "fetch", Map.of(), Map.of(), FETCH_PROFILE_TIME, List.of()) : null; } + + public void testFetchTimeoutWithPartialResults() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + w.addDocument(new Document()); + w.addDocument(new Document()); + w.addDocument(new Document()); + IndexReader r = w.getReader(); + w.close(); + ContextIndexSearcher contextIndexSearcher = createSearcher(r); + try (SearchContext searchContext = createSearchContext(contextIndexSearcher, true)) { + FetchPhase fetchPhase = createFetchPhase(contextIndexSearcher); + fetchPhase.execute(searchContext, new int[] { 0, 1, 2 }, null); + assertTrue(searchContext.queryResult().searchTimedOut()); + assertEquals(1, searchContext.fetchResult().hits().getHits().length); + } finally { + r.close(); + dir.close(); + } + } + + public void testFetchTimeoutNoPartialResults() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + w.addDocument(new Document()); + w.addDocument(new Document()); + w.addDocument(new Document()); + IndexReader r = w.getReader(); + w.close(); + ContextIndexSearcher contextIndexSearcher = createSearcher(r); + + try (SearchContext searchContext = createSearchContext(contextIndexSearcher, false)) { + FetchPhase fetchPhase = createFetchPhase(contextIndexSearcher); + expectThrows(SearchTimeoutException.class, () -> fetchPhase.execute(searchContext, new int[] { 0, 1, 2 }, null)); + assertNull(searchContext.fetchResult().hits()); + } finally { + r.close(); + dir.close(); + } + } + + private static ContextIndexSearcher createSearcher(IndexReader reader) throws IOException { + return new ContextIndexSearcher(reader, null, null, new QueryCachingPolicy() { + @Override + public void onUse(Query query) {} + + @Override + public boolean shouldCache(Query query) { + return false; + } + }, randomBoolean()); + } + + private static FetchPhase createFetchPhase(ContextIndexSearcher contextIndexSearcher) { + return new FetchPhase(Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() { + boolean processCalledOnce = false; + + @Override + public void setNextReader(LeafReaderContext readerContext) {} + + @Override + public void process(FetchSubPhase.HitContext hitContext) { + // we throw only once one doc has been fetched, so we can test partial results are returned + if (processCalledOnce) { + contextIndexSearcher.throwTimeExceededException(); + } else { + processCalledOnce = true; + } + } + + @Override + public StoredFieldsSpec storedFieldsSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + })); + } + + private static SearchContext createSearchContext(ContextIndexSearcher contextIndexSearcher, boolean allowPartialResults) { + IndexSettings indexSettings = new IndexSettings( + IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(System.currentTimeMillis()) + .build(), + Settings.EMPTY + ); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + + SearchExecutionContext searchExecutionContext = new SearchExecutionContext( + 0, + 0, + indexSettings, + bitsetFilterCache, + null, + null, + MappingLookup.EMPTY, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + Collections.emptyMap(), + null, + MapperMetrics.NOOP + ); + TestSearchContext searchContext = new TestSearchContext(searchExecutionContext, null, contextIndexSearcher) { + private final FetchSearchResult fetchSearchResult = new FetchSearchResult(); + private final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(allowPartialResults), + new ShardId("index", "indexUUID", 0), + 0, + 1, + AliasFilter.EMPTY, + 1f, + 0L, + null + ); + + @Override + public IdLoader newIdLoader() { + return new IdLoader.StoredIdLoader(); + } + + @Override + public FetchSearchResult fetchResult() { + return fetchSearchResult; + } + + @Override + public ShardSearchRequest request() { + return request; + } + }; + searchContext.addReleasable(searchContext.fetchResult()::decRef); + searchContext.setTask(new SearchShardTask(-1, "type", "action", "description", null, Collections.emptyMap())); + return searchContext; + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 526961d74bf52..0c11123960622 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -16,12 +16,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; @@ -105,23 +102,6 @@ public void testSerialization() throws Exception { assertNotSame(deserializedRequest, searchRequest); } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // this can be removed when the affected transport version constants are collapsed - public void testSerializationConstants() throws Exception { - SearchRequest searchRequest = createSearchRequest(); - - // something serialized with previous version to remove, should read correctly with the reversion - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setTransportVersion(TransportVersionUtils.getPreviousVersion(TransportVersions.REMOVE_MIN_COMPATIBLE_SHARD_NODE)); - searchRequest.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - in.setTransportVersion(TransportVersions.REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE); - SearchRequest copiedRequest = new SearchRequest(in); - assertEquals(copiedRequest, searchRequest); - assertEquals(copiedRequest.hashCode(), searchRequest.hashCode()); - } - } - } - public void testSerializationMultiKNN() throws Exception { SearchRequest searchRequest = createSearchRequest(); if (searchRequest.source() == null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 70682cfd41d82..a9de118c6b859 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1765,7 +1765,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { new IndexNameExpressionResolver(threadPool.getThreadContext(), EmptySystemIndices.INSTANCE), null, null, - new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()), new SearchResponseMetrics(TelemetryProvider.NOOP.getMeterRegistry()), client, new UsageService() diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java index 0cb4a56794c22..a1b9c59571496 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java @@ -26,7 +26,7 @@ public void testEmptyRequestBody() throws Exception { .withParams(Map.of("synonymsSet", "testSet", "synonymRuleId", "testRule")) .build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 0); + FakeRestChannel channel = new FakeRestChannel(request, true, 0); try (var threadPool = createThreadPool()) { final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java index 54dff48788f52..4dce73fcf0e89 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java @@ -26,7 +26,7 @@ public void testEmptyRequestBody() throws Exception { .withParams(Map.of("synonymsSet", "test")) .build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 0); + FakeRestChannel channel = new FakeRestChannel(request, true, 0); try (var threadPool = createThreadPool()) { final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java index 69e6983e16381..0efa576a0cddc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -80,7 +80,12 @@ public void testShardStats() { var queue = new DeterministicTaskQueue(); try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { - var service = new AllocationStatsService(clusterService, () -> clusterInfo, createShardAllocator(), TEST_WRITE_LOAD_FORECASTER); + var service = new AllocationStatsService( + clusterService, + () -> clusterInfo, + createShardAllocator(), + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) + ); assertThat( service.stats(), allOf( @@ -120,7 +125,7 @@ public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { clusterService, EmptyClusterInfoService.INSTANCE, createShardAllocator(), - TEST_WRITE_LOAD_FORECASTER + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) ); assertThat( service.stats(), @@ -163,7 +168,8 @@ public void testUndesiredShardCount() { threadPool, clusterService, (innerState, strategy) -> innerState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public DesiredBalance getDesiredBalance() { @@ -176,7 +182,7 @@ public DesiredBalance getDesiredBalance() { ); } }, - TEST_WRITE_LOAD_FORECASTER + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) ); assertThat( service.stats(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java index 44f3b7d1d3a11..c5ae771199541 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java @@ -490,7 +490,8 @@ private Map.Entry createNewAllocationSer clusterService, (clusterState, routingAllocationAction) -> strategyRef.get() .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction), - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public void allocate(RoutingAllocation allocation, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 51401acabb0ac..7b77947792bd4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -42,6 +42,8 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProvider; +import org.elasticsearch.common.time.TimeProviderUtils; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexVersion; @@ -1203,42 +1205,40 @@ public void testShouldLogComputationIteration() { private void checkIterationLogging(int iterations, long eachIterationDuration, MockLog.AbstractEventExpectation expectation) { var currentTime = new AtomicLong(0L); + TimeProvider timeProvider = TimeProviderUtils.create(() -> currentTime.addAndGet(eachIterationDuration)); + // Some runs of this test try to simulate a long desired balance computation. Setting a high value on the following setting // prevents interrupting a long computation. var clusterSettings = createBuiltInClusterSettings( Settings.builder().put(DesiredBalanceComputer.MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING.getKey(), "2m").build() ); - var desiredBalanceComputer = new DesiredBalanceComputer( - clusterSettings, - () -> currentTime.addAndGet(eachIterationDuration), - new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final var shardRouting = unassignedIterator.next(); - if (shardRouting.primary()) { - unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); - } else { - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); - } - } - - // move shard on each iteration - for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); - } - for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, timeProvider, new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final var shardRouting = unassignedIterator.next(); + if (shardRouting.primary()) { + unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); + } else { + unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); } } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); + // move shard on each iteration + for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); + } + for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); } } - ); + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); + } + }); assertThatLogger(() -> { var iteration = new AtomicInteger(0); @@ -1346,7 +1346,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator allocator) { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), () -> 0L, allocator); + return new DesiredBalanceComputer(createBuiltInClusterSettings(), TimeProviderUtils.create(() -> 0L), allocator); } private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java index 85dc5c9dcd6a9..9e6e080f38216 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java @@ -27,7 +27,7 @@ public void testZeroAllMetrics() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomNonNegativeLong(); long undesiredAllocations = randomNonNegativeLong(); - metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of(), Map.of()); assertEquals(totalAllocations, metrics.totalAllocations()); assertEquals(unassignedShards, metrics.unassignedShards()); assertEquals(undesiredAllocations, metrics.undesiredAllocations()); @@ -44,7 +44,7 @@ public void testMetricsAreOnlyPublishedWhenNodeIsMaster() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomLongBetween(100, 10000000); long undesiredAllocations = randomLongBetween(0, totalAllocations); - metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of(), Map.of()); // Collect when not master meterRegistry.getRecorder().collect(); @@ -104,7 +104,7 @@ public void testUndesiredAllocationRatioIsZeroWhenTotalShardsIsZero() { RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); long unassignedShards = randomNonNegativeLong(); - metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of(), Map.of()); metrics.setNodeIsMaster(true); meterRegistry.getRecorder().collect(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index b5f44ee9e505f..54f4f0ffb6e15 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -1214,7 +1214,8 @@ public void testRebalanceDoesNotCauseHotSpots() { var reconciler = new DesiredBalanceReconciler( clusterSettings, new DeterministicTaskQueue().getThreadPool(), - DesiredBalanceMetrics.NOOP + DesiredBalanceMetrics.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var totalOutgoingMoves = new HashMap(); @@ -1296,7 +1297,12 @@ public void testShouldLogOnTooManyUndesiredAllocations() { final var timeInMillisSupplier = new AtomicLong(); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(timeInMillisSupplier::incrementAndGet); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP); + var reconciler = new DesiredBalanceReconciler( + createBuiltInClusterSettings(), + threadPool, + DesiredBalanceMetrics.NOOP, + EMPTY_NODE_ALLOCATION_STATS + ); final long initialDelayInMillis = TimeValue.timeValueMinutes(5).getMillis(); timeInMillisSupplier.addAndGet(randomLongBetween(initialDelayInMillis, 2 * initialDelayInMillis)); @@ -1348,10 +1354,8 @@ public void testShouldLogOnTooManyUndesiredAllocations() { private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { final var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); - new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP).reconcile( - desiredBalance, - routingAllocation - ); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP, EMPTY_NODE_ALLOCATION_STATS) + .reconcile(desiredBalance, routingAllocation); } private static boolean isReconciled(RoutingNode node, DesiredBalance balance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 2cb3204787ce1..b18e2c0cd2647 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProviderUtils; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.core.TimeValue; @@ -168,7 +169,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo threadPool, clusterService, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); assertValidStats(desiredBalanceShardsAllocator.getStats()); var allocationService = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator(allocateUnassigned)); @@ -295,7 +297,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo threadPool, clusterService, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = new AllocationService( new AllocationDeciders(List.of()), @@ -396,7 +399,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, time::get, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, TimeProviderUtils.create(time::get), shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -413,7 +416,8 @@ boolean hasEnoughIterations(int currentIteration) { } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -522,7 +526,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -540,7 +544,8 @@ public DesiredBalance compute( } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -625,7 +630,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -643,7 +648,8 @@ public DesiredBalance compute( } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); @@ -712,7 +718,7 @@ public void testResetDesiredBalance() { var delegateAllocator = createShardsAllocator(); var clusterSettings = createBuiltInClusterSettings(); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator) { + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -734,7 +740,8 @@ public DesiredBalance compute( clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -780,18 +787,15 @@ public void testResetDesiredBalanceOnNoLongerMaster() { var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - threadPool::relativeTimeInMillis, - delegateAllocator - ); + var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -833,18 +837,15 @@ public void testResetDesiredBalanceOnNodeShutdown() { final var resetCalled = new AtomicBoolean(); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - threadPool::relativeTimeInMillis, - delegateAllocator - ); + var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); var desiredBalanceAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public void resetDesiredBalance() { diff --git a/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java new file mode 100644 index 0000000000000..964683a1972ba --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java @@ -0,0 +1,270 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common; + +import org.elasticsearch.test.ESTestCase; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Base64; +import java.util.HashSet; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.IntStream; + +public class TimeBasedUUIDGeneratorTests extends ESTestCase { + + public void testTimeBasedUUIDGeneration() { + assertUUIDFormat(createGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testTimeBasedUUIDUniqueness() { + assertUUIDUniqueness(createGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testTimeBasedUUIDSequenceOverflow() { + // The assumption here is that our system will not generate more than 1000 UUIDs within the same millisecond. + // The sequence ID is set close to its max value (0x00FF_FFFF) to quickly trigger an overflow. + // However, since we are generating only 1000 UUIDs, the timestamp is expected to change at least once, + // ensuring uniqueness even if the sequence ID wraps around. + assertEquals( + 1000, + generateUUIDs( + createGenerator(() -> Instant.now().toEpochMilli(), () -> 0x00FF_FFFF - 10, new TestRandomMacAddressSupplier()), + 1000 + ).size() + ); + } + + public void testTimeBasedUUIDClockReset() { + // Simulate a clock that resets itself after reaching a threshold. + final Supplier unreliableClock = new TestClockResetTimestampSupplier( + Instant.now(), + 1, + 50, + ChronoUnit.MILLIS, + Instant.now().plus(100, ChronoUnit.MILLIS) + ); + final UUIDGenerator generator = createGenerator(unreliableClock, () -> 0, new TestRandomMacAddressSupplier()); + + final Set beforeReset = generateUUIDs(generator, 5_000); + final Set afterReset = generateUUIDs(generator, 5_000); + + // Ensure all UUIDs are unique, even after the clock resets. + assertEquals(5_000, beforeReset.size()); + assertEquals(5_000, afterReset.size()); + beforeReset.addAll(afterReset); + assertEquals(10_000, beforeReset.size()); + } + + public void testKOrderedUUIDGeneration() { + assertUUIDFormat(createKOrderedGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testKOrderedUUIDUniqueness() { + assertUUIDUniqueness( + createKOrderedGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), + 100_000 + ); + } + + public void testKOrderedUUIDSequenceOverflow() { + final UUIDGenerator generator = createKOrderedGenerator( + () -> Instant.now().toEpochMilli(), + () -> 0x00FF_FFFF - 10, + new TestRandomMacAddressSupplier() + ); + final Set uuids = generateUUIDs(generator, 1000); + + // The assumption here is that our system will not generate more than 1000 UUIDs within the same millisecond. + // The sequence ID is set close to its max value (0x00FF_FFFF) to quickly trigger an overflow. + // However, since we are generating only 1000 UUIDs, the timestamp is expected to change at least once, + // ensuring uniqueness even if the sequence ID wraps around. + assertEquals(1000, uuids.size()); + } + + public void testUUIDEncodingDecoding() { + testUUIDEncodingDecodingHelper( + Instant.parse("2024-11-13T10:12:43Z").toEpochMilli(), + 12345, + new TestRandomMacAddressSupplier().get() + ); + } + + public void testUUIDEncodingDecodingWithRandomValues() { + testUUIDEncodingDecodingHelper( + randomInstantBetween(Instant.now().minus(1, ChronoUnit.DAYS), Instant.now()).toEpochMilli(), + randomIntBetween(0, 0x00FF_FFFF), + new TestRandomMacAddressSupplier().get() + ); + } + + private void testUUIDEncodingDecodingHelper(final long timestamp, final int sequenceId, final byte[] macAddress) { + final TestTimeBasedKOrderedUUIDDecoder decoder = new TestTimeBasedKOrderedUUIDDecoder( + createKOrderedGenerator(() -> timestamp, () -> sequenceId, () -> macAddress).getBase64UUID() + ); + + // The sequence ID is incremented by 1 when generating the UUID. + assertEquals("Sequence ID does not match", sequenceId + 1, decoder.decodeSequenceId()); + // Truncate the timestamp to milliseconds to match the UUID generation granularity. + assertEquals( + "Timestamp does not match", + Instant.ofEpochMilli(timestamp).truncatedTo(ChronoUnit.MILLIS), + Instant.ofEpochMilli(decoder.decodeTimestamp()).truncatedTo(ChronoUnit.MILLIS) + ); + assertArrayEquals("MAC address does not match", macAddress, decoder.decodeMacAddress()); + } + + private void assertUUIDUniqueness(final UUIDGenerator generator, final int count) { + assertEquals(count, generateUUIDs(generator, count).size()); + } + + private Set generateUUIDs(final UUIDGenerator generator, final int count) { + return IntStream.range(0, count).mapToObj(i -> generator.getBase64UUID()).collect(HashSet::new, Set::add, Set::addAll); + } + + private void assertUUIDFormat(final UUIDGenerator generator, final int count) { + IntStream.range(0, count).forEach(i -> { + final String uuid = generator.getBase64UUID(); + assertNotNull(uuid); + assertEquals(20, uuid.length()); + assertFalse(uuid.contains("+")); + assertFalse(uuid.contains("/")); + assertFalse(uuid.contains("=")); + }); + } + + private UUIDGenerator createGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + return new TimeBasedUUIDGenerator(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + private UUIDGenerator createKOrderedGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + return new TimeBasedKOrderedUUIDGenerator(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + private static class TestRandomMacAddressSupplier implements Supplier { + private final byte[] macAddress = new byte[] { randomByte(), randomByte(), randomByte(), randomByte(), randomByte(), randomByte() }; + + @Override + public byte[] get() { + return macAddress; + } + } + + /** + * A {@link Supplier} implementation that simulates a clock that can move forward or backward in time. + * This supplier provides timestamps in milliseconds since the epoch, adjusting based on a given delta + * until a reset threshold is reached. After crossing the threshold, the timestamp moves backwards by a reset delta. + */ + private static class TestClockResetTimestampSupplier implements Supplier { + private Instant currentTime; + private final long delta; + private final long resetDelta; + private final ChronoUnit unit; + private final Instant resetThreshold; + + /** + * Constructs a new {@link TestClockResetTimestampSupplier}. + * + * @param startTime The initial starting time. + * @param delta The amount of time to add to the current time in each forward step. + * @param resetDelta The amount of time to subtract once the reset threshold is reached. + * @param unit The unit of time for both delta and resetDelta. + * @param resetThreshold The threshold after which the time is reset backwards. + */ + TestClockResetTimestampSupplier( + final Instant startTime, + final long delta, + final long resetDelta, + final ChronoUnit unit, + final Instant resetThreshold + ) { + this.currentTime = startTime; + this.delta = delta; + this.resetDelta = resetDelta; + this.unit = unit; + this.resetThreshold = resetThreshold; + } + + /** + * Provides the next timestamp in milliseconds since the epoch. + * If the current time is before the reset threshold, it advances the time by the delta. + * Otherwise, it subtracts the reset delta. + * + * @return The current time in milliseconds since the epoch. + */ + @Override + public Long get() { + if (currentTime.isBefore(resetThreshold)) { + currentTime = currentTime.plus(delta, unit); + } else { + currentTime = currentTime.minus(resetDelta, unit); + } + return currentTime.toEpochMilli(); + } + } + + /** + * A utility class to decode the K-ordered UUID extracting the original timestamp, MAC address and sequence ID. + */ + private static class TestTimeBasedKOrderedUUIDDecoder { + + private final byte[] decodedBytes; + + /** + * Constructs a new {@link TestTimeBasedKOrderedUUIDDecoder} using a base64-encoded UUID string. + * + * @param base64UUID The base64-encoded UUID string to decode. + */ + TestTimeBasedKOrderedUUIDDecoder(final String base64UUID) { + this.decodedBytes = Base64.getUrlDecoder().decode(base64UUID); + } + + /** + * Decodes the timestamp from the UUID using the following bytes: + * 0 (most significant), 1, 2, 3, 11, 13 (least significant). + * + * @return The decoded timestamp in milliseconds. + */ + public long decodeTimestamp() { + return ((long) (decodedBytes[0] & 0xFF) << 40) | ((long) (decodedBytes[1] & 0xFF) << 32) | ((long) (decodedBytes[2] & 0xFF) + << 24) | ((long) (decodedBytes[3] & 0xFF) << 16) | ((long) (decodedBytes[11] & 0xFF) << 8) | (decodedBytes[13] & 0xFF); + } + + /** + * Decodes the MAC address from the UUID using bytes 4 to 9. + * + * @return The decoded MAC address as a byte array. + */ + public byte[] decodeMacAddress() { + byte[] macAddress = new byte[6]; + System.arraycopy(decodedBytes, 4, macAddress, 0, 6); + return macAddress; + } + + /** + * Decodes the sequence ID from the UUID using bytes: + * 10 (most significant), 12 (middle), 14 (least significant). + * + * @return The decoded sequence ID. + */ + public int decodeSequenceId() { + return ((decodedBytes[10] & 0xFF) << 16) | ((decodedBytes[12] & 0xFF) << 8) | (decodedBytes[14] & 0xFF); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 2e7dbb00aa2de..71c705f5df511 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -27,31 +27,78 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import java.util.Base64; import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.function.Supplier; public class UUIDTests extends ESTestCase { - static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator(); + static final Base64.Decoder BASE_64_URL_DECODER = Base64.getUrlDecoder(); + static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator( + UUIDs.DEFAULT_TIMESTAMP_SUPPLIER, + UUIDs.DEFAULT_SEQUENCE_ID_SUPPLIER, + UUIDs.DEFAULT_MAC_ADDRESS_SUPPLIER + ); static UUIDGenerator randomUUIDGen = new RandomBasedUUIDGenerator(); + static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator( + UUIDs.DEFAULT_TIMESTAMP_SUPPLIER, + UUIDs.DEFAULT_SEQUENCE_ID_SUPPLIER, + UUIDs.DEFAULT_MAC_ADDRESS_SUPPLIER + ); public void testRandomUUID() { - verifyUUIDSet(100000, randomUUIDGen); + verifyUUIDSet(100000, randomUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testTimeUUID() { - verifyUUIDSet(100000, timeUUIDGen); + verifyUUIDSet(100000, timeUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } - public void testThreadedTimeUUID() { - testUUIDThreaded(timeUUIDGen); + public void testKOrderedUUID() { + verifyUUIDSet(100000, kOrderedUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testThreadedRandomUUID() { testUUIDThreaded(randomUUIDGen); } + public void testThreadedTimeUUID() { + testUUIDThreaded(timeUUIDGen); + } + + public void testThreadedKOrderedUUID() { + testUUIDThreaded(kOrderedUUIDGen); + } + + public void testCompression() throws Exception { + Logger logger = LogManager.getLogger(UUIDTests.class); + + assertThat(testCompression(timeUUIDGen, 100000, 10000, 3, logger), Matchers.lessThan(14d)); + assertThat(testCompression(timeUUIDGen, 100000, 1000, 3, logger), Matchers.lessThan(15d)); + assertThat(testCompression(timeUUIDGen, 100000, 100, 3, logger), Matchers.lessThan(21d)); + + assertThat(testCompression(kOrderedUUIDGen, 100000, 10000, 3, logger), Matchers.lessThan(13d)); + assertThat(testCompression(kOrderedUUIDGen, 100000, 1000, 3, logger), Matchers.lessThan(14d)); + assertThat(testCompression(kOrderedUUIDGen, 100000, 100, 3, logger), Matchers.lessThan(19d)); + } + + public void testComparativeCompression() throws Exception { + Logger logger = LogManager.getLogger(UUIDTests.class); + + int numDocs = 100000; + int docsPerSecond = 1000; + int nodes = 3; + + double randomCompression = testCompression(randomUUIDGen, numDocs, docsPerSecond, nodes, logger); + double baseCompression = testCompression(timeUUIDGen, numDocs, docsPerSecond, nodes, logger); + double kOrderedCompression = testCompression(kOrderedUUIDGen, numDocs, docsPerSecond, nodes, logger); + + assertThat(kOrderedCompression, Matchers.lessThanOrEqualTo(baseCompression)); + assertThat(kOrderedCompression, Matchers.lessThanOrEqualTo(randomCompression)); + } + Set verifyUUIDSet(int count, UUIDGenerator uuidSource) { HashSet uuidSet = new HashSet<>(); for (int i = 0; i < count; ++i) { @@ -107,51 +154,55 @@ public void testUUIDThreaded(UUIDGenerator uuidSource) { globalSet.addAll(runner.uuidSet); } assertEquals(count * uuids, globalSet.size()); + globalSet.forEach(this::verifyUUIDIsUrlSafe); } - public void testCompression() throws Exception { - Logger logger = LogManager.getLogger(UUIDTests.class); - // Low number so that the test runs quickly, but the results are more interesting with larger numbers - // of indexed documents - assertThat(testCompression(100000, 10000, 3, logger), Matchers.lessThan(14d)); // ~12 in practice - assertThat(testCompression(100000, 1000, 3, logger), Matchers.lessThan(15d)); // ~13 in practice - assertThat(testCompression(100000, 100, 3, logger), Matchers.lessThan(21d)); // ~20 in practice - } - - private static double testCompression(int numDocs, int numDocsPerSecond, int numNodes, Logger logger) throws Exception { - final double intervalBetweenDocs = 1000. / numDocsPerSecond; // milliseconds + private static double testCompression(final UUIDGenerator generator, int numDocs, int numDocsPerSecond, int numNodes, Logger logger) + throws Exception { + final double intervalBetweenDocs = 1000. / numDocsPerSecond; final byte[][] macAddresses = new byte[numNodes][]; Random r = random(); for (int i = 0; i < macAddresses.length; ++i) { macAddresses[i] = new byte[6]; random().nextBytes(macAddresses[i]); } - UUIDGenerator generator = new TimeBasedUUIDGenerator() { - double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); - @Override - protected long currentTimeMillis() { - currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); - return (long) currentTimeMillis; + UUIDGenerator uuidSource = generator; + if (generator instanceof TimeBasedUUIDGenerator) { + if (generator instanceof TimeBasedKOrderedUUIDGenerator) { + uuidSource = new TimeBasedKOrderedUUIDGenerator(new Supplier<>() { + double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); + + @Override + public Long get() { + currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); + return (long) currentTimeMillis; + } + }, () -> 0, () -> RandomPicks.randomFrom(r, macAddresses)); + } else { + uuidSource = new TimeBasedUUIDGenerator(new Supplier<>() { + double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); + + @Override + public Long get() { + currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); + return (long) currentTimeMillis; + } + }, () -> 0, () -> RandomPicks.randomFrom(r, macAddresses)); } + } - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; - // Avoid randomization which will slow down things without improving - // the quality of this test Directory dir = newFSDirectory(createTempDir()); IndexWriterConfig config = new IndexWriterConfig().setCodec(Codec.forName(Lucene.LATEST_CODEC)) - .setMergeScheduler(new SerialMergeScheduler()); // for reproducibility + .setMergeScheduler(new SerialMergeScheduler()); + IndexWriter w = new IndexWriter(dir, config); Document doc = new Document(); StringField id = new StringField("_id", "", Store.NO); doc.add(id); long start = System.nanoTime(); for (int i = 0; i < numDocs; ++i) { - id.setStringValue(generator.getBase64UUID()); + id.setStringValue(uuidSource.getBase64UUID()); w.addDocument(doc); } w.forceMerge(1); @@ -164,33 +215,37 @@ protected byte[] macAddress() { dir.close(); double bytesPerDoc = (double) size / numDocs; logger.info( - numDocs - + " docs indexed at " - + numDocsPerSecond - + " docs/s required " - + ByteSizeValue.ofBytes(size) - + " bytes of disk space, or " - + bytesPerDoc - + " bytes per document. Took: " - + new TimeValue(time) - + "." + "{} - {} docs indexed at {} docs/s required {} bytes of disk space, or {} bytes per document. Took: {}.", + uuidSource.getClass().getSimpleName(), + numDocs, + numDocsPerSecond, + ByteSizeValue.ofBytes(size), + bytesPerDoc, + new TimeValue(time) ); return bytesPerDoc; } public void testStringLength() { assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(RandomBasedUUIDGenerator.SIZE_IN_BYTES)); - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, UUIDs.randomBase64UUID().length()); - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, UUIDs.randomBase64UUID(random()).length()); - try (var secureString = UUIDs.randomBase64UUIDSecureString()) { - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, secureString.toString().length()); - } - assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(TimeBasedUUIDGenerator.SIZE_IN_BYTES)); - assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, UUIDs.base64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(TimeBasedKOrderedUUIDGenerator.SIZE_IN_BYTES)); + + assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, randomUUIDGen.getBase64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, timeUUIDGen.getBase64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, kOrderedUUIDGen.getBase64UUID().length()); } private static int getUnpaddedBase64StringLength(int sizeInBytes) { return (int) Math.ceil(sizeInBytes * 4.0 / 3.0); } + + private void verifyUUIDIsUrlSafe(final String uuid) { + assertFalse("UUID should not contain padding characters: " + uuid, uuid.contains("=")); + try { + BASE_64_URL_DECODER.decode(uuid); + } catch (IllegalArgumentException e) { + throw new AssertionError("UUID is not a valid Base64 URL-safe encoded string: " + uuid); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index b197fc3d5dc25..b9755ba250f47 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -1244,16 +1244,16 @@ public void testStrictParsing() { assertParseException("2018-12-31T12:12:12", "strict_date_hour_minute_second_millis", 19); assertParseException("2018-12-31T12:12:12", "strict_date_hour_minute_second_fraction", 19); assertParses("2018-12-31", "strict_date_optional_time"); - assertParseException("2018-12-1", "strict_date_optional_time", 7); - assertParseException("2018-1-31", "strict_date_optional_time", 4); + assertParseException("2018-12-1", "strict_date_optional_time", 8); + assertParseException("2018-1-31", "strict_date_optional_time", 5); assertParseException("10000-01-31", "strict_date_optional_time", 4); assertParses("2010-01-05T02:00", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30Z", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30+0100", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30+01:00", "strict_date_optional_time"); - assertParseException("2018-12-31T10:15:3", "strict_date_optional_time", 16); - assertParseException("2018-12-31T10:5:30", "strict_date_optional_time", 13); + assertParseException("2018-12-31T10:15:3", "strict_date_optional_time", 17); + assertParseException("2018-12-31T10:5:30", "strict_date_optional_time", 14); assertParseException("2018-12-31T9:15:30", "strict_date_optional_time", 11); assertParses("2015-01-04T00:00Z", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30.1Z", "strict_date_time"); diff --git a/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java b/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java new file mode 100644 index 0000000000000..a3c5c105eb34a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.core.TimeValue; + +import java.util.function.LongSupplier; + +public class TimeProviderUtils { + + /** + * Creates a TimeProvider implementation for tests that uses the same source for + * all methods (regardless of relative or absolute time). + */ + public static TimeProvider create(LongSupplier timeSourceInMillis) { + return new TimeProvider() { + @Override + public long relativeTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + + @Override + public long relativeTimeInNanos() { + return timeSourceInMillis.getAsLong() * TimeValue.NSEC_PER_MSEC; + } + + @Override + public long rawRelativeTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + + @Override + public long absoluteTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 7628ee8c954b4..a161794e35b91 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -31,8 +31,10 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.function.UnaryOperator; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -47,7 +49,7 @@ public void testUpdateTemplateMetadataOnUpgrade() { IndexTemplateMetadata.builder("added_test_template").patterns(randomIndexPatterns()).build() ); return templates; - })); + }), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertNotSame(upgrade, metadata); @@ -57,7 +59,7 @@ public void testUpdateTemplateMetadataOnUpgrade() { public void testNoMetadataUpgrade() { Metadata metadata = randomMetadata(new CustomMetadata1("data")); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList(), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertSame(upgrade, metadata); assertTrue(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -68,7 +70,7 @@ public void testNoMetadataUpgrade() { public void testCustomMetadataValidation() { Metadata metadata = randomMetadata(new CustomMetadata1("data")); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList(), List.of()); try { GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); } catch (IllegalStateException e) { @@ -78,7 +80,7 @@ public void testCustomMetadataValidation() { public void testIndexMetadataUpgrade() { Metadata metadata = randomMetadata(); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList(), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(true), metadataUpgrader); assertNotSame(upgrade, metadata); assertTrue(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -89,7 +91,7 @@ public void testIndexMetadataUpgrade() { public void testCustomMetadataNoChange() { Metadata metadata = randomMetadata(new CustomMetadata1("data")); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(HashMap::new)); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(HashMap::new), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertSame(upgrade, metadata); assertTrue(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -98,11 +100,74 @@ public void testCustomMetadataNoChange() { } } + public void testCustomMetadata_appliesUpgraders() { + CustomMetadata2 custom2 = new CustomMetadata2("some data"); + // Test with a CustomMetadata1 and a CustomMetadata2... + Metadata originalMetadata = Metadata.builder() + .putCustom(CustomMetadata1.TYPE, new CustomMetadata1("data")) + .putCustom(CustomMetadata2.TYPE, custom2) + .build(); + // ...and two sets of upgraders which affect CustomMetadata1 and some other types... + Map> customUpgraders = Map.of( + CustomMetadata1.TYPE, + toUpgrade -> new CustomMetadata1("new " + ((CustomMetadata1) toUpgrade).getData()), + "not_" + CustomMetadata1.TYPE, + toUpgrade -> { + fail("This upgrader should not be invoked"); + return toUpgrade; + } + ); + Map> moreCustomUpgraders = Map.of("also_not_" + CustomMetadata1.TYPE, toUpgrade -> { + fail("This upgrader should not be invoked"); + return toUpgrade; + }); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(List.of(HashMap::new), List.of(customUpgraders, moreCustomUpgraders)); + Metadata upgradedMetadata = GatewayMetaState.upgradeMetadata( + originalMetadata, + new MockIndexMetadataVerifier(false), + metadataUpgrader + ); + // ...and assert that the CustomMetadata1 has been upgraded... + assertEquals(new CustomMetadata1("new data"), upgradedMetadata.custom(CustomMetadata1.TYPE)); + // ...but the CustomMetadata2 is untouched. + assertSame(custom2, upgradedMetadata.custom(CustomMetadata2.TYPE)); + } + + public void testCustomMetadata_appliesMultipleUpgraders() { + // Test with a CustomMetadata1 and a CustomMetadata2... + Metadata originalMetadata = Metadata.builder() + .putCustom(CustomMetadata1.TYPE, new CustomMetadata1("data")) + .putCustom(CustomMetadata2.TYPE, new CustomMetadata2("other data")) + .build(); + // ...and a set of upgraders which affects both of those... + Map> customUpgraders = Map.of( + CustomMetadata1.TYPE, + toUpgrade -> new CustomMetadata1("new " + ((CustomMetadata1) toUpgrade).getData()), + CustomMetadata2.TYPE, + toUpgrade -> new CustomMetadata2("new " + ((CustomMetadata2) toUpgrade).getData()) + ); + // ...and another set of upgraders which applies a second upgrade to CustomMetadata2... + Map> moreCustomUpgraders = Map.of( + CustomMetadata2.TYPE, + toUpgrade -> new CustomMetadata2("more " + ((CustomMetadata2) toUpgrade).getData()) + ); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(List.of(HashMap::new), List.of(customUpgraders, moreCustomUpgraders)); + Metadata upgradedMetadata = GatewayMetaState.upgradeMetadata( + originalMetadata, + new MockIndexMetadataVerifier(false), + metadataUpgrader + ); + // ...and assert that the first upgrader has been applied to the CustomMetadata1... + assertEquals(new CustomMetadata1("new data"), upgradedMetadata.custom(CustomMetadata1.TYPE)); + // ...and both upgraders have been applied to the CustomMetadata2. + assertEquals(new CustomMetadata2("more new other data"), upgradedMetadata.custom(CustomMetadata2.TYPE)); + } + public void testIndexTemplateValidation() { Metadata metadata = randomMetadata(); MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(customs -> { throw new IllegalStateException("template is incompatible"); - })); + }), List.of()); String message = expectThrows( IllegalStateException.class, () -> GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader) @@ -136,8 +201,7 @@ public void testMultipleIndexTemplateUpgrade() { .build() ); return indexTemplateMetadatas; - - })); + }), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertNotSame(upgrade, metadata); assertFalse(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -228,6 +292,29 @@ public EnumSet context() { } } + private static class CustomMetadata2 extends TestCustomMetadata { + public static final String TYPE = "custom_md_2"; + + CustomMetadata2(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + private static Metadata randomMetadata(TestCustomMetadata... customMetadatas) { Metadata.Builder builder = Metadata.builder(); for (TestCustomMetadata customMetadata : customMetadatas) { diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index cf623e77f740a..19d92568e6528 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -271,7 +271,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel( fakeRequest, - false, + true, RestStatus.BAD_REQUEST ); @@ -361,7 +361,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th Map> restHeaders = new HashMap<>(); restHeaders.put(Task.TRACE_PARENT_HTTP_HEADER, Collections.singletonList(traceParentValue)); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); try ( AbstractHttpServerTransport transport = new AbstractHttpServerTransport( diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 909005d228665..49a4d519c0ea4 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -111,6 +111,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; @@ -237,7 +238,8 @@ public void testWrapperIsBound() throws IOException { indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); module.setReaderWrapper(s -> new Wrapper()); @@ -264,7 +266,8 @@ public void testRegisterIndexStore() throws IOException { indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); final IndexService indexService = newIndexService(module); @@ -289,7 +292,8 @@ public void testDirectoryWrapper() throws IOException { indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); module.setDirectoryWrapper(new TestDirectoryWrapper()); @@ -642,7 +646,8 @@ public void testRegisterCustomRecoveryStateFactory() throws IOException { indexNameExpressionResolver, recoveryStateFactories, mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); final IndexService indexService = newIndexService(module); @@ -664,7 +669,8 @@ public void testIndexCommitListenerIsBound() throws IOException, ExecutionExcept indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); final AtomicLong lastAcquiredPrimaryTerm = new AtomicLong(); @@ -766,7 +772,8 @@ private static IndexModule createIndexModule( indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); } diff --git a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index d7d5c886e0741..77ab665166926 100644 --- a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -94,17 +94,7 @@ public void testInvalidateEntries() throws Exception { DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0)); - BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, BitsetFilterCache.Listener.NOOP); BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value"))); assertThat(matchCount(filter, reader), equalTo(3)); @@ -237,17 +227,7 @@ public void testSetNullListener() { } public void testRejectOtherIndex() throws IOException { - BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, BitsetFilterCache.Listener.NOOP); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index 9e4a19eb039fd..6b1ffc3693636 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -19,7 +19,6 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -30,7 +29,6 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.script.ScriptCompiler; @@ -132,13 +130,7 @@ private CodecService createCodecService() throws IOException { Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER ); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP); MapperService service = new MapperService( () -> TransportVersion.current(), settings, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index b07ec8e7cb683..083efccceec16 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -27,11 +27,7 @@ import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -72,8 +68,6 @@ public void testSynthesizeIdSimple() throws Exception { } public void testSynthesizeIdMultipleSegments() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); @@ -144,8 +138,6 @@ public void testSynthesizeIdMultipleSegments() throws Exception { } public void testSynthesizeIdRandom() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); @@ -153,7 +145,6 @@ public void testSynthesizeIdRandom() throws Exception { List randomDocs = new ArrayList<>(); int numberOfTimeSeries = randomIntBetween(8, 64); for (int i = 0; i < numberOfTimeSeries; i++) { - long routingId = 0; int numberOfDimensions = randomIntBetween(1, 6); List dimensions = new ArrayList<>(numberOfDimensions); for (int j = 1; j <= numberOfDimensions; j++) { @@ -165,7 +156,6 @@ public void testSynthesizeIdRandom() throws Exception { value = randomAlphaOfLength(4); } dimensions.add(new Dimension(fieldName, value)); - routingId = value.hashCode(); } int numberOfSamples = randomIntBetween(1, 16); for (int j = 0; j < numberOfSamples; j++) { @@ -225,21 +215,21 @@ private void prepareIndexReader( } private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IOException { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingFields = new RoutingPathFields(null); final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { - builder.addLong(dimension.field, n.longValue()); + routingFields.addLong(dimension.field, n.longValue()); fields.add(new SortedNumericDocValuesField(dimension.field, ((Number) dimension.value).longValue())); } else { - builder.addString(dimension.field, dimension.value.toString()); + routingFields.addString(dimension.field, dimension.value.toString()); fields.add(new SortedSetDocValuesField(dimension.field, new BytesRef(dimension.value.toString()))); } } - BytesRef tsid = builder.buildTsidHash().toBytesRef(); + BytesRef tsid = routingFields.buildHash().toBytesRef(); fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, tsid)); fields.add( new SortedDocValuesField( @@ -251,25 +241,15 @@ private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IO } private static String expectedId(Doc doc, int routingHash) throws IOException { - var timeSeriesIdBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingFields = new RoutingPathFields(null); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { - timeSeriesIdBuilder.addLong(dimension.field, n.longValue()); + routingFields.addLong(dimension.field, n.longValue()); } else { - timeSeriesIdBuilder.addString(dimension.field, dimension.value.toString()); + routingFields.addString(dimension.field, dimension.value.toString()); } } - return TsidExtractingIdFieldMapper.createId(routingHash, timeSeriesIdBuilder.buildTsidHash().toBytesRef(), doc.timestamp); - } - - private static IndexRouting.ExtractFromSource createRouting(List routingPaths) { - var settings = indexSettings(IndexVersion.current(), 2, 1).put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-01T00:00:00.000Z") - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2001-01-01T00:00:00.000Z") - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPaths) - .build(); - var indexMetadata = IndexMetadata.builder("index").settings(settings).build(); - return (IndexRouting.ExtractFromSource) IndexRouting.fromIndexMetadata(indexMetadata); + return TsidExtractingIdFieldMapper.createId(routingHash, routingFields.buildHash().toBytesRef(), doc.timestamp); } record Doc(long timestamp, List dimensions) {} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 0bf4c36d70a90..e0f58b8922be2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; @@ -20,7 +19,6 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.ScriptService; @@ -47,13 +45,7 @@ private static MappingParser createMappingParser(Settings settings, IndexVersion IndexAnalyzers indexAnalyzers = createIndexAnalyzers(); SimilarityService similarityService = new SimilarityService(indexSettings, scriptService, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); Supplier mappingParserContextSupplier = () -> new MappingParserContext( similarityService::getSimilarity, type -> mapperRegistry.getMapperParser(type, indexSettings.getIndexVersionCreated()), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java new file mode 100644 index 0000000000000..2c2c0d160c904 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.IndexRouting; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; + +public class RoutingPathFieldsTests extends ESTestCase { + + public void testWithBuilder() throws Exception { + IndexSettings settings = new IndexSettings( + IndexMetadata.builder("test") + .settings( + indexSettings(IndexVersion.current(), 1, 1).put( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "path.*").build() + ) + ) + .build(), + Settings.EMPTY + ); + IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); + + var routingPathFields = new RoutingPathFields(routing.builder()); + BytesReference current, previous; + + routingPathFields.addString("path.string_name", randomAlphaOfLengthBetween(1, 10)); + current = previous = routingPathFields.buildHash(); + assertNotNull(current); + + routingPathFields.addBoolean("path.boolean_name", randomBoolean()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addLong("path.long_name", randomLong()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addIp("path.ip_name", randomIp(randomBoolean())); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addUnsignedLong("path.unsigned_long_name", randomLongBetween(0, Long.MAX_VALUE)); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + assertArrayEquals(current.array(), routingPathFields.buildHash().array()); + } + + public void testWithoutBuilder() throws Exception { + var routingPathFields = new RoutingPathFields(null); + BytesReference current, previous; + + routingPathFields.addString("path.string_name", randomAlphaOfLengthBetween(1, 10)); + current = previous = routingPathFields.buildHash(); + assertNotNull(current); + + routingPathFields.addBoolean("path.boolean_name", randomBoolean()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addLong("path.long_name", randomLong()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addIp("path.ip_name", randomIp(randomBoolean())); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addUnsignedLong("path.unsigned_long_name", randomLongBetween(0, Long.MAX_VALUE)); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + assertArrayEquals(current.array(), routingPathFields.buildHash().array()); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValuesTests.java new file mode 100644 index 0000000000000..ef316c5addefa --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValuesTests.java @@ -0,0 +1,374 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.script.field.vectors.ByteMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.FloatMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.MultiDenseVector; +import org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.index.IndexVersionUtils; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Iterator; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; + +public class MultiDenseVectorScriptDocValuesTests extends ESTestCase { + + public void testFloatGetVectorValueAndGetMagnitude() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] expectedMagnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + + for (IndexVersion indexVersion : List.of(IndexVersionUtils.randomCompatibleVersion(random()), IndexVersion.current())) { + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, indexVersion); + BinaryDocValues magnitudeValues = wrap(expectedMagnitudes); + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + assertEquals(vectors[i].length, field.size()); + assertEquals(dims, scriptDocValues.dims()); + Iterator iterator = scriptDocValues.getVectorValues(); + float[] magnitudes = scriptDocValues.getMagnitudes(); + assertEquals(expectedMagnitudes[i].length, magnitudes.length); + for (int j = 0; j < vectors[i].length; j++) { + assertTrue(iterator.hasNext()); + assertArrayEquals(vectors[i][j], iterator.next(), 0.0001f); + assertEquals(expectedMagnitudes[i][j], magnitudes[j], 0.0001f); + } + } + } + } + + public void testByteGetVectorValueAndGetMagnitude() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] expectedMagnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.current()); + BinaryDocValues magnitudeValues = wrap(expectedMagnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + assertEquals(vectors[i].length, field.size()); + assertEquals(dims, scriptDocValues.dims()); + Iterator iterator = scriptDocValues.getVectorValues(); + float[] magnitudes = scriptDocValues.getMagnitudes(); + assertEquals(expectedMagnitudes[i].length, magnitudes.length); + for (int j = 0; j < vectors[i].length; j++) { + assertTrue(iterator.hasNext()); + assertArrayEquals(vectors[i][j], iterator.next(), 0.0001f); + assertEquals(expectedMagnitudes[i][j], magnitudes[j], 0.0001f); + } + } + } + + public void testFloatMetadataAndIterator() throws IOException { + int dims = 3; + IndexVersion indexVersion = IndexVersion.current(); + float[][][] vectors = new float[][][] { fill(new float[3][dims], ElementType.FLOAT), fill(new float[2][dims], ElementType.FLOAT) }; + float[][] magnitudes = new float[][] { new float[3], new float[2] }; + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, indexVersion); + BinaryDocValues magnitudeValues = wrap(magnitudes); + + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + MultiDenseVector dv = field.get(); + assertEquals(vectors[i].length, dv.size()); + assertFalse(dv.isEmpty()); + assertEquals(dims, dv.getDims()); + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, field::iterator); + assertEquals("Cannot iterate over single valued multi_dense_vector field, use get() instead", e.getMessage()); + } + field.setNextDocId(vectors.length); + MultiDenseVector dv = field.get(); + assertEquals(dv, MultiDenseVector.EMPTY); + } + + public void testByteMetadataAndIterator() throws IOException { + int dims = 3; + IndexVersion indexVersion = IndexVersion.current(); + float[][][] vectors = new float[][][] { fill(new float[3][dims], ElementType.BYTE), fill(new float[2][dims], ElementType.BYTE) }; + float[][] magnitudes = new float[][] { new float[3], new float[2] }; + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, indexVersion); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + MultiDenseVector dv = field.get(); + assertEquals(vectors[i].length, dv.size()); + assertFalse(dv.isEmpty()); + assertEquals(dims, dv.getDims()); + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, field::iterator); + assertEquals("Cannot iterate over single valued multi_dense_vector field, use get() instead", e.getMessage()); + } + field.setNextDocId(vectors.length); + MultiDenseVector dv = field.get(); + assertEquals(dv, MultiDenseVector.EMPTY); + } + + protected float[][] fill(float[][] vectors, ElementType elementType) { + for (float[] vector : vectors) { + for (int i = 0; i < vector.length; i++) { + vector[i] = elementType == ElementType.FLOAT ? randomFloat() : randomByte(); + } + } + return vectors; + } + + public void testFloatMissingValues() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.current()); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(3); + assertEquals(0, field.size()); + Exception e = expectThrows(IllegalArgumentException.class, scriptDocValues::getVectorValues); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, scriptDocValues::getMagnitudes); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + } + + public void testByteMissingValues() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.current()); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(3); + assertEquals(0, field.size()); + Exception e = expectThrows(IllegalArgumentException.class, scriptDocValues::getVectorValues); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, scriptDocValues::getMagnitudes); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + } + + public void testFloatGetFunctionIsNotAccessible() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.current()); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(0); + Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); + assertThat( + e.getMessage(), + containsString( + "accessing a multi-vector field's value through 'get' or 'value' is not supported," + + " use 'vectorValues' or 'magnitudes' instead." + ) + ); + } + + public void testByteGetFunctionIsNotAccessible() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.current()); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(0); + Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); + assertThat( + e.getMessage(), + containsString( + "accessing a multi-vector field's value through 'get' or 'value' is not supported," + + " use 'vectorValues' or 'magnitudes' instead." + ) + ); + } + + public static BinaryDocValues wrap(float[][] magnitudes) { + return new BinaryDocValues() { + int idx = -1; + int maxIdx = magnitudes.length; + + @Override + public BytesRef binaryValue() { + if (idx >= maxIdx) { + throw new IllegalStateException("max index exceeded"); + } + ByteBuffer magnitudeBuffer = ByteBuffer.allocate(magnitudes[idx].length * Float.BYTES).order(ByteOrder.LITTLE_ENDIAN); + for (float magnitude : magnitudes[idx]) { + magnitudeBuffer.putFloat(magnitude); + } + return new BytesRef(magnitudeBuffer.array()); + } + + @Override + public boolean advanceExact(int target) { + idx = target; + if (target < maxIdx) { + return true; + } + return false; + } + + @Override + public int docID() { + return idx; + } + + @Override + public int nextDoc() { + return idx++; + } + + @Override + public int advance(int target) { + throw new IllegalArgumentException("not defined!"); + } + + @Override + public long cost() { + throw new IllegalArgumentException("not defined!"); + } + }; + } + + public static BinaryDocValues wrap(float[][][] vectors, ElementType elementType, IndexVersion indexVersion) { + return new BinaryDocValues() { + int idx = -1; + int maxIdx = vectors.length; + + @Override + public BytesRef binaryValue() { + if (idx >= maxIdx) { + throw new IllegalStateException("max index exceeded"); + } + return mockEncodeDenseVector(vectors[idx], elementType, indexVersion); + } + + @Override + public boolean advanceExact(int target) { + idx = target; + if (target < maxIdx) { + return true; + } + return false; + } + + @Override + public int docID() { + return idx; + } + + @Override + public int nextDoc() { + return idx++; + } + + @Override + public int advance(int target) { + throw new IllegalArgumentException("not defined!"); + } + + @Override + public long cost() { + throw new IllegalArgumentException("not defined!"); + } + }; + } + + public static BytesRef mockEncodeDenseVector(float[][] values, ElementType elementType, IndexVersion indexVersion) { + int dims = values[0].length; + if (elementType == ElementType.BIT) { + dims *= Byte.SIZE; + } + int numBytes = elementType.getNumBytes(dims); + ByteBuffer byteBuffer = elementType.createByteBuffer(indexVersion, numBytes * values.length); + for (float[] vector : values) { + for (float value : vector) { + if (elementType == ElementType.FLOAT) { + byteBuffer.putFloat(value); + } else if (elementType == ElementType.BYTE || elementType == ElementType.BIT) { + byteBuffer.put((byte) value); + } else { + throw new IllegalStateException("unknown element_type [" + elementType + "]"); + } + } + } + return new BytesRef(byteBuffer.array()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RankDocsQueryBuilderTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java rename to server/src/test/java/org/elasticsearch/index/query/RankDocsQueryBuilderTests.java index e8f88f3297b78..ba39702d3d162 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RankDocsQueryBuilderTests.java @@ -7,7 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.search.retriever.rankdoc; +package org.elasticsearch.index.query; import org.apache.lucene.document.Document; import org.apache.lucene.document.NumericDocValuesField; @@ -22,9 +22,8 @@ import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java index 6198d6580cb3d..b62fff2eceb28 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.ingest; import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; @@ -53,18 +52,16 @@ public void testFromXContent() throws IOException { builder.endObject(); XContentBuilder shuffled = shuffleXContent(builder); try (XContentParser parser = createParser(shuffled)) { - Metadata.Custom custom = IngestMetadata.fromXContent(parser); - assertTrue(custom instanceof IngestMetadata); - IngestMetadata m = (IngestMetadata) custom; - assertEquals(2, m.getPipelines().size()); - assertEquals("1", m.getPipelines().get("1").getId()); - assertEquals("2", m.getPipelines().get("2").getId()); - assertEquals(pipeline.getConfigAsMap(), m.getPipelines().get("1").getConfigAsMap()); - assertEquals(pipeline2.getConfigAsMap(), m.getPipelines().get("2").getConfigAsMap()); + IngestMetadata custom = IngestMetadata.fromXContent(parser); + assertEquals(2, custom.getPipelines().size()); + assertEquals("1", custom.getPipelines().get("1").getId()); + assertEquals("2", custom.getPipelines().get("2").getId()); + assertEquals(pipeline.getConfigAsMap(), custom.getPipelines().get("1").getConfigAsMap()); + assertEquals(pipeline2.getConfigAsMap(), custom.getPipelines().get("2").getConfigAsMap()); } } - public void testDiff() throws Exception { + public void testDiff() { BytesReference pipelineConfig = new BytesArray("{}"); Map pipelines = new HashMap<>(); @@ -79,7 +76,7 @@ public void testDiff() throws Exception { IngestMetadata ingestMetadata2 = new IngestMetadata(pipelines); IngestMetadata.IngestMetadataDiff diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata2.diff(ingestMetadata1); - DiffableUtils.MapDiff pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; + DiffableUtils.MapDiff pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; assertThat(pipelinesDiff.getDeletes(), contains("2")); assertThat(Maps.ofEntries(pipelinesDiff.getUpserts()), allOf(aMapWithSize(2), hasKey("3"), hasKey("4"))); @@ -96,7 +93,7 @@ public void testDiff() throws Exception { IngestMetadata ingestMetadata3 = new IngestMetadata(pipelines); diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata3.diff(ingestMetadata1); - pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; + pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; assertThat(pipelinesDiff.getDeletes(), empty()); assertThat(pipelinesDiff.getUpserts(), empty()); @@ -112,7 +109,7 @@ public void testDiff() throws Exception { IngestMetadata ingestMetadata4 = new IngestMetadata(pipelines); diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata4.diff(ingestMetadata1); - pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; + pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; assertThat(Maps.ofEntries(pipelinesDiff.getDiffs()), allOf(aMapWithSize(1), hasKey("2"))); endResult = (IngestMetadata) diff.apply(ingestMetadata4); @@ -138,4 +135,169 @@ public void testChunkedToXContent() { response -> 2 + response.getPipelines().size() ); } + + public void testMaybeUpgradeProcessors_appliesUpgraderToSingleProcessor() { + String originalPipelineConfig = """ + { + "processors": [ + { + "foo": { + "fooNumber": 123 + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + IngestMetadata originalMetadata = new IngestMetadata( + Map.of("pipeline1", new PipelineConfiguration("pipeline1", new BytesArray(originalPipelineConfig), XContentType.JSON)) + ); + IngestMetadata upgradedMetadata = originalMetadata.maybeUpgradeProcessors( + "foo", + config -> config.putIfAbsent("fooString", "new") == null + ); + String expectedPipelineConfig = """ + { + "processors": [ + { + "foo": { + "fooNumber": 123, + "fooString": "new" + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + IngestMetadata expectedMetadata = new IngestMetadata( + Map.of("pipeline1", new PipelineConfiguration("pipeline1", new BytesArray(expectedPipelineConfig), XContentType.JSON)) + ); + assertEquals(expectedMetadata, upgradedMetadata); + } + + public void testMaybeUpgradeProcessors_returnsSameObjectIfNoUpgradeNeeded() { + String originalPipelineConfig = """ + { + "processors": [ + { + "foo": { + "fooNumber": 123, + "fooString": "old" + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + IngestMetadata originalMetadata = new IngestMetadata( + Map.of("pipeline1", new PipelineConfiguration("pipeline1", new BytesArray(originalPipelineConfig), XContentType.JSON)) + ); + IngestMetadata upgradedMetadata = originalMetadata.maybeUpgradeProcessors( + "foo", + config -> config.putIfAbsent("fooString", "new") == null + ); + assertSame(originalMetadata, upgradedMetadata); + } + + public void testMaybeUpgradeProcessors_appliesUpgraderToMultipleProcessorsInMultiplePipelines() { + String originalPipelineConfig1 = """ + { + "description": "A pipeline with a foo and a bar processor in different list items", + "processors": [ + { + "foo": { + "fooNumber": 123 + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + String originalPipelineConfig2 = """ + { + "description": "A pipeline with a foo and a qux processor in the same list item", + "processors": [ + { + "foo": { + "fooNumber": 321 + }, + "qux": { + "quxNumber": 654 + } + } + ] + } + """; + IngestMetadata originalMetadata = new IngestMetadata( + Map.of( + "pipeline1", + new PipelineConfiguration("pipeline1", new BytesArray(originalPipelineConfig1), XContentType.JSON), + "pipeline2", + new PipelineConfiguration("pipeline2", new BytesArray(originalPipelineConfig2), XContentType.JSON) + ) + ); + IngestMetadata upgradedMetadata = originalMetadata.maybeUpgradeProcessors( + "foo", + config -> config.putIfAbsent("fooString", "new") == null + ); + String expectedPipelineConfig1 = """ + { + "description": "A pipeline with a foo and a bar processor in different list items", + "processors": [ + { + "foo": { + "fooNumber": 123, + "fooString": "new" + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + String expectedPipelineConfig2 = """ + { + "description": "A pipeline with a foo and a qux processor in the same list item", + "processors": [ + { + "foo": { + "fooNumber": 321, + "fooString": "new" + }, + "qux": { + "quxNumber": 654 + } + } + ] + } + """; + IngestMetadata expectedMetadata = new IngestMetadata( + Map.of( + "pipeline1", + new PipelineConfiguration("pipeline1", new BytesArray(expectedPipelineConfig1), XContentType.JSON), + "pipeline2", + new PipelineConfiguration("pipeline2", new BytesArray(expectedPipelineConfig2), XContentType.JSON) + ) + ); + assertEquals(expectedMetadata, upgradedMetadata); + } } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java index aa47f3c066f57..7f6885e7a977f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java @@ -65,14 +65,26 @@ public void setUp() throws Exception { } public void testCanRegisterTwoRepositoriesWithDifferentTypes() { - when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); - when(plugin2.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type2", factory)); + when( + plugin1.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin2.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type2", factory)); // Would throw new RepositoriesModule( @@ -83,18 +95,32 @@ public void testCanRegisterTwoRepositoriesWithDifferentTypes() { mock(ClusterService.class), MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP); + recoverySettings, + TelemetryProvider.NOOP + ); } public void testCannotRegisterTwoRepositoriesWithSameTypes() { - when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); - when(plugin2.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin1.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin2.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -106,7 +132,9 @@ public void testCannotRegisterTwoRepositoriesWithSameTypes() { clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP) + recoverySettings, + TelemetryProvider.NOOP + ) ); assertEquals("Repository type [type1] is already registered", ex.getMessage()); @@ -130,17 +158,25 @@ public void testCannotRegisterTwoInternalRepositoriesWithSameTypes() { clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP) + recoverySettings, + TelemetryProvider.NOOP + ) ); assertEquals("Internal repository type [type1] is already registered", ex.getMessage()); } public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() { - when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin1.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); when(plugin2.getInternalRepositories(environment, contentRegistry, clusterService, recoverySettings)).thenReturn( Collections.singletonMap("type1", factory) ); @@ -155,7 +191,9 @@ public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() { clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP) + recoverySettings, + TelemetryProvider.NOOP + ) ); assertEquals("Internal repository type [type1] is already registered as a non-internal repository", ex.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 9f82911ed121f..8a8bed9ca73db 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -73,7 +73,7 @@ public List routes() { params.put("consumed", randomAlphaOfLength(8)); params.put("unconsumed", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -108,7 +108,7 @@ public List routes() { params.put("unconsumed-first", randomAlphaOfLength(8)); params.put("unconsumed-second", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -155,7 +155,7 @@ public List routes() { params.put("very_close_to_parametre", randomAlphaOfLength(8)); params.put("very_far_from_every_consumed_parameter", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -206,7 +206,7 @@ public List routes() { params.put("consumed", randomAlphaOfLength(8)); params.put("response_param", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -238,7 +238,7 @@ public List routes() { params.put("human", null); params.put("error_trace", randomFrom("true", "false", null)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -283,7 +283,7 @@ public List routes() { params.put("size", randomAlphaOfLength(8)); params.put("time", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -314,7 +314,7 @@ public List routes() { new BytesArray(builder.toString()), XContentType.JSON ).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -341,7 +341,7 @@ public List routes() { }; final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -371,7 +371,7 @@ public List routes() { new BytesArray(builder.toString()), XContentType.JSON ).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) diff --git a/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java b/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java index eece90ed94cf9..907c16aad5fdc 100644 --- a/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java +++ b/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java @@ -56,7 +56,7 @@ public void testEncodesChunkedXContentCorrectly() throws IOException { ToXContent.EMPTY_PARAMS, new FakeRestChannel( new FakeRestRequest.Builder(xContentRegistry()).withContent(BytesArray.EMPTY, randomXContent.type()).build(), - randomBoolean(), + true, 1 ) ); diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 8f1904ce42438..afdad1045b4de 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -161,7 +161,7 @@ public void testApplyProductSpecificResponseHeaders() { final ThreadContext threadContext = client.threadPool().getThreadContext(); final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); // the rest controller relies on the caller to stash the context, so we should expect these values here as we didn't stash the // context in this test @@ -180,7 +180,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "bar")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); assertTrue(channel.getSendResponseCalled()); } @@ -211,7 +211,7 @@ public String getName() { }); } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.OK); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy( eq(1L), @@ -235,7 +235,7 @@ public MethodHandlers next() { return null; } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -257,7 +257,7 @@ public MethodHandlers next() { } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -280,7 +280,7 @@ public String getName() { })); when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenAnswer(x -> handlers.iterator()); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.METHOD_NOT_ALLOWED); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.METHOD_NOT_ALLOWED); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 405))); } @@ -290,7 +290,7 @@ public void testDispatchBadRequestEmitsMetric() { final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchBadRequest(channel, threadContext, new Exception()); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -314,7 +314,7 @@ public MethodHandlers next() { return new MethodHandlers("/").addMethod(GET, RestApiVersion.current(), (request, channel, client) -> {}); } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); verify(tracer).startTrace( eq(threadContext), @@ -340,7 +340,7 @@ public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { new RestResponse(RestStatus.OK, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY) ) ); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.OK); restController.dispatchRequest(fakeRequest, channel, threadContext); assertTrue(channel.getSendResponseCalled()); } @@ -831,7 +831,7 @@ public void testFavicon() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withMethod(GET) .withPath("/favicon.ico") .build(); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, false, RestStatus.OK); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().contentType(), containsString("image/x-icon")); @@ -1115,7 +1115,7 @@ public void testApiProtectionWithServerlessDisabled() { List accessiblePaths = List.of("/public", "/internal", "/hidden"); accessiblePaths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); } @@ -1137,7 +1137,7 @@ public void testApiProtectionWithServerlessEnabledAsEndUser() { final Consumer> checkUnprotected = paths -> paths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); final Consumer> checkProtected = paths -> paths.forEach(path -> { diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 3b839896bc34f..4345f3c5e3fb4 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -97,7 +97,7 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { RestRequest restRequest = fakeRestRequestBuilder.build(); // Send the request and verify the response status code - FakeRestChannel restChannel = new FakeRestChannel(restRequest, false, 1); + FakeRestChannel restChannel = new FakeRestChannel(restRequest, true, 1); restController.dispatchRequest(restRequest, restChannel, new ThreadContext(Settings.EMPTY)); assertThat(restChannel.capturedResponse().status().getStatus(), is(405)); diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index c65fd85307ece..cfed83f352951 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -93,6 +93,7 @@ public void testWithHeaders() throws Exception { assertThat(response.getHeaders().get("n1"), contains("v11", "v12")); assertThat(response.getHeaders().get("n2"), notNullValue()); assertThat(response.getHeaders().get("n2"), contains("v21", "v22")); + assertChannelWarnings(channel); } public void testEmptyChunkedBody() { @@ -117,6 +118,7 @@ public void testSimpleExceptionMessage() throws Exception { assertThat(text, not(containsString("FileNotFoundException"))); assertThat(text, not(containsString("/foo/bar"))); assertThat(text, not(containsString("error_trace"))); + assertChannelWarnings(channel); } public void testDetailedExceptionMessage() throws Exception { @@ -143,6 +145,7 @@ public void testNonElasticsearchExceptionIsNotShownAsSimpleMessage() throws Exce assertThat(text, not(containsString("FileNotFoundException[/foo/bar]"))); assertThat(text, not(containsString("error_trace"))); assertThat(text, containsString("\"error\":\"No ElasticsearchException found\"")); + assertChannelWarnings(channel); } public void testErrorTrace() throws Exception { @@ -174,6 +177,7 @@ public void testAuthenticationFailedNoStackTrace() throws IOException { RestResponse response = new RestResponse(channel, authnException); assertThat(response.status(), is(RestStatus.UNAUTHORIZED)); assertThat(response.content().utf8ToString(), not(containsString(ElasticsearchException.STACK_TRACE))); + assertChannelWarnings(channel); } } } @@ -198,6 +202,7 @@ public void testStackTrace() throws IOException { } else { assertThat(response.content().utf8ToString(), not(containsString(ElasticsearchException.STACK_TRACE))); } + assertChannelWarnings(channel); } } } @@ -229,6 +234,7 @@ public void testNullThrowable() throws Exception { String text = response.content().utf8ToString(); assertThat(text, containsString("\"error\":\"unknown\"")); assertThat(text, not(containsString("error_trace"))); + assertChannelWarnings(channel); } public void testConvert() throws IOException { @@ -429,6 +435,7 @@ public void testErrorToAndFromXContent() throws IOException { assertEquals(expected.status(), parsedError.status()); assertDeepEquals(expected, parsedError); + assertChannelWarnings(channel); } public void testNoErrorFromXContent() throws IOException { @@ -495,6 +502,7 @@ public void testResponseContentTypeUponException() throws Exception { Exception t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); RestResponse response = new RestResponse(channel, t); assertThat(response.contentType(), equalTo(mediaType)); + assertChannelWarnings(channel); } public void testSupressedLogging() throws IOException { @@ -526,6 +534,7 @@ public void testSupressedLogging() throws IOException { "401", "unauthorized" ); + assertChannelWarnings(channel); } private void assertLogging( @@ -551,6 +560,15 @@ private void assertLogging( } } + private void assertChannelWarnings(RestChannel channel) { + if (channel.detailedErrorsEnabled() == false) { + assertWarnings( + "The JSON format of non-detailed errors will change in Elasticsearch 9.0" + + " to match the JSON structure used for detailed errors. To keep using the existing format, use the V8 REST API." + ); + } + } + public static class WithHeadersException extends ElasticsearchException { WithHeadersException() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java index 827a07b89b2b8..03ae366050646 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java @@ -26,7 +26,7 @@ public class RestBuilderListenerTests extends ESTestCase { public void testXContentBuilderClosedInBuildResponse() throws Exception { AtomicReference builderAtomicReference = new AtomicReference<>(); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) + new FakeRestChannel(new FakeRestRequest(), true, 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { @@ -44,7 +44,7 @@ public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws E public void testXContentBuilderNotClosedInBuildResponseAssertionsDisabled() throws Exception { AtomicReference builderAtomicReference = new AtomicReference<>(); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) + new FakeRestChannel(new FakeRestRequest(), true, 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { @@ -68,7 +68,7 @@ public void testXContentBuilderNotClosedInBuildResponseAssertionsEnabled() throw assumeTrue("tests are not being run with assertions", RestBuilderListener.class.desiredAssertionStatus()); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) + new FakeRestChannel(new FakeRestRequest(), true, 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java index 880a0bc9eabd7..8104ecfc31c3d 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java @@ -34,7 +34,7 @@ public void testConsumesParameters() throws Exception { FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams( Map.of("parent_task_id", "the node:3", "nodes", "node1,node2", "actions", "*") ).build(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, false, 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, 1); try (var threadPool = createThreadPool()) { final var nodeClient = buildNodeClient(threadPool); action.handleRequest(fakeRestRequest, fakeRestChannel, nodeClient); diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java index 3b6b280565da5..0d35e4311032d 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java @@ -222,7 +222,7 @@ public void next() { }) .withHeaders(Map.of("Content-Type", Collections.singletonList("application/json"))) .build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 1); + FakeRestChannel channel = new FakeRestChannel(request, true, 1); RestBulkAction.ChunkHandler chunkHandler = new RestBulkAction.ChunkHandler( true, diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java index 24f59a8c3abe7..4822b1c64cf41 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java @@ -51,7 +51,7 @@ public void testEnableFieldsEmulationNoErrors() throws Exception { .withParams(params) .build(); - action.handleRequest(request, new FakeRestChannel(request, false, 1), verifyingClient); + action.handleRequest(request, new FakeRestChannel(request, true, 1), verifyingClient); } public void testValidateSearchRequest() { diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 5371893993318..7c9a68cbc91f1 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.mapper.DateFieldMapper.Resolution; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.test.ESTestCase; @@ -43,8 +43,8 @@ public class DocValueFormatTests extends ESTestCase { public void testSerialization() throws Exception { List entries = new ArrayList<>(); entries.add(new Entry(DocValueFormat.class, DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP)); @@ -379,11 +379,11 @@ public void testParseZone() { } public void testParseTsid() throws IOException { - TimeSeriesIdBuilder timeSeriesIdBuilder = new TimeSeriesIdBuilder(null); - timeSeriesIdBuilder.addString("string", randomAlphaOfLength(10)); - timeSeriesIdBuilder.addLong("long", randomLong()); - timeSeriesIdBuilder.addUnsignedLong("ulong", randomLong()); - BytesRef expected = timeSeriesIdBuilder.buildTsidHash().toBytesRef(); + var routingFields = new RoutingPathFields(null); + routingFields.addString("string", randomAlphaOfLength(10)); + routingFields.addLong("long", randomLong()); + routingFields.addUnsignedLong("ulong", randomLong()); + BytesRef expected = routingFields.buildHash().toBytesRef(); byte[] expectedBytes = new byte[expected.length]; System.arraycopy(expected.bytes, 0, expectedBytes, 0, expected.length); BytesRef actual = DocValueFormat.TIME_SERIES_ID.parseBytesRef(expected); diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java deleted file mode 100644 index 15f5ed0d800d2..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.search.TelemetryMetrics; - -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.junit.After; -import org.junit.Before; - -import java.util.Collection; -import java.util.List; - -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; - -public class SearchTransportTelemetryTests extends ESSingleNodeTestCase { - - private static final String indexName = "test_search_metrics2"; - private final int num_primaries = randomIntBetween(2, 7); - - @Override - protected boolean resetNodeAfterTest() { - return true; - } - - @Before - private void setUpIndex() throws Exception { - createIndex( - indexName, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, num_primaries) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build() - ); - ensureGreen(indexName); - - prepareIndex(indexName).setId("1").setSource("body", "doc1").setRefreshPolicy(IMMEDIATE).get(); - prepareIndex(indexName).setId("2").setSource("body", "doc2").setRefreshPolicy(IMMEDIATE).get(); - } - - @After - private void afterTest() { - resetMeter(); - } - - @Override - protected Collection> getPlugins() { - return pluginList(TestTelemetryPlugin.class); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") - public void testSearchTransportMetricsDfsQueryThenFetch() throws InterruptedException { - assertSearchHitsWithoutFailures( - client().prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), - "1" - ); - assertEquals(num_primaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); - resetMeter(); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") - public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { - assertSearchHitsWithoutFailures( - client().prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), - "1" - ); - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); - resetMeter(); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") - public void testSearchTransportMetricsScroll() throws InterruptedException { - assertScrollResponsesAndHitCount( - client(), - TimeValue.timeValueSeconds(60), - client().prepareSearch(indexName) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setSize(1) - .setQuery(simpleQueryStringQuery("doc1 doc2")), - 2, - (respNum, response) -> { - if (respNum == 1) { - assertEquals(num_primaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); - } else if (respNum == 2) { - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_SCROLL_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_SCROLL_ACTION_METRIC)); - } - resetMeter(); - } - ); - - assertEquals(num_primaries, getNumberOfMeasurements(FREE_CONTEXT_SCROLL_ACTION_METRIC)); - resetMeter(); - } - - private void resetMeter() { - getTestTelemetryPlugin().resetMeter(); - } - - private TestTelemetryPlugin getTestTelemetryPlugin() { - return getInstanceFromNode(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); - } - - private long getNumberOfMeasurements(String attributeValue) { - final List measurements = getTestTelemetryPlugin().getLongHistogramMeasurement( - org.elasticsearch.action.search.SearchTransportAPMMetrics.SEARCH_ACTION_LATENCY_BASE_METRIC - ); - return measurements.stream() - .filter( - m -> m.attributes().get(org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME) == attributeValue - ) - .count(); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java new file mode 100644 index 0000000000000..80bb7ebc8ddb8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.TelemetryMetrics; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.ExecutorNames; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; +import static org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics.FETCH_SEARCH_PHASE_METRIC; +import static org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics.QUERY_SEARCH_PHASE_METRIC; +import static org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics.SYSTEM_THREAD_ATTRIBUTE_NAME; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; + +public class ShardSearchPhaseAPMMetricsTests extends ESSingleNodeTestCase { + + private static final String indexName = "test_search_metrics2"; + private final int num_primaries = randomIntBetween(2, 7); + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Before + private void setUpIndex() throws Exception { + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, num_primaries) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(indexName); + + prepareIndex(indexName).setId("1").setSource("body", "doc1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("2").setSource("body", "doc2").setRefreshPolicy(IMMEDIATE).get(); + + prepareIndex(TestSystemIndexPlugin.INDEX_NAME).setId("1").setSource("body", "doc1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(TestSystemIndexPlugin.INDEX_NAME).setId("2").setSource("body", "doc2").setRefreshPolicy(IMMEDIATE).get(); + } + + @After + private void afterTest() { + resetMeter(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(TestTelemetryPlugin.class, TestSystemIndexPlugin.class); + } + + public void testMetricsDfsQueryThenFetch() throws InterruptedException { + checkMetricsDfsQueryThenFetch(indexName, false); + } + + public void testMetricsDfsQueryThenFetchSystem() throws InterruptedException { + checkMetricsDfsQueryThenFetch(TestSystemIndexPlugin.INDEX_NAME, true); + } + + private void checkMetricsDfsQueryThenFetch(String indexName, boolean isSystemIndex) throws InterruptedException { + assertSearchHitsWithoutFailures( + client().prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), + "1" + ); + checkNumberOfMeasurementsForPhase(QUERY_SEARCH_PHASE_METRIC, isSystemIndex); + assertNotEquals(0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + checkMetricsAttributes(isSystemIndex); + } + + public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { + checkSearchTransportMetricsQueryThenFetch(indexName, false); + } + + public void testSearchTransportMetricsQueryThenFetchSystem() throws InterruptedException { + checkSearchTransportMetricsQueryThenFetch(TestSystemIndexPlugin.INDEX_NAME, true); + } + + private void checkSearchTransportMetricsQueryThenFetch(String indexName, boolean isSystemIndex) throws InterruptedException { + assertSearchHitsWithoutFailures( + client().prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), + "1" + ); + checkNumberOfMeasurementsForPhase(QUERY_SEARCH_PHASE_METRIC, isSystemIndex); + assertNotEquals(0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + checkMetricsAttributes(isSystemIndex); + } + + public void testSearchTransportMetricsScroll() throws InterruptedException { + checkSearchTransportMetricsScroll(indexName, false); + } + + public void testSearchTransportMetricsScrollSystem() throws InterruptedException { + checkSearchTransportMetricsScroll(TestSystemIndexPlugin.INDEX_NAME, true); + } + + private void checkSearchTransportMetricsScroll(String indexName, boolean isSystemIndex) throws InterruptedException { + assertScrollResponsesAndHitCount( + client(), + TimeValue.timeValueSeconds(60), + client().prepareSearch(indexName) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSize(1) + .setQuery(simpleQueryStringQuery("doc1 doc2")), + 2, + (respNum, response) -> { + // No hits, no fetching done + assertEquals(isSystemIndex ? 1 : num_primaries, getNumberOfMeasurementsForPhase(QUERY_SEARCH_PHASE_METRIC)); + if (response.getHits().getHits().length > 0) { + assertNotEquals(0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + } else { + assertEquals(isSystemIndex ? 1 : 0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + } + checkMetricsAttributes(isSystemIndex); + resetMeter(); + } + ); + + } + + private void resetMeter() { + getTestTelemetryPlugin().resetMeter(); + } + + private TestTelemetryPlugin getTestTelemetryPlugin() { + return getInstanceFromNode(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); + } + + private void checkNumberOfMeasurementsForPhase(String phase, boolean isSystemIndex) { + int numMeasurements = getNumberOfMeasurementsForPhase(phase); + assertEquals(isSystemIndex ? 1 : num_primaries, numMeasurements); + } + + private int getNumberOfMeasurementsForPhase(String phase) { + final List measurements = getTestTelemetryPlugin().getLongHistogramMeasurement(phase); + return measurements.size(); + } + + private void checkMetricsAttributes(boolean isSystem) { + final List queryMeasurements = getTestTelemetryPlugin().getLongHistogramMeasurement(QUERY_SEARCH_PHASE_METRIC); + final List fetchMeasurements = getTestTelemetryPlugin().getLongHistogramMeasurement(QUERY_SEARCH_PHASE_METRIC); + assertTrue( + Stream.concat(queryMeasurements.stream(), fetchMeasurements.stream()).allMatch(m -> checkMeasurementAttributes(m, isSystem)) + ); + } + + private boolean checkMeasurementAttributes(Measurement m, boolean isSystem) { + return ((boolean) m.attributes().get(SYSTEM_THREAD_ATTRIBUTE_NAME)) == isSystem; + } + + public static class TestSystemIndexPlugin extends Plugin implements SystemIndexPlugin { + + static final String INDEX_NAME = ".test-system-index"; + + public TestSystemIndexPlugin() {} + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return List.of( + SystemIndexDescriptor.builder() + .setIndexPattern(INDEX_NAME + "*") + .setPrimaryIndex(INDEX_NAME) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMappings(""" + { + "_meta": { + "version": "8.0.0", + "managed_index_mappings_version": 3 + }, + "properties": { + "body": { "type": "keyword" } + } + } + """) + .setThreadPools(ExecutorNames.DEFAULT_SYSTEM_INDEX_THREAD_POOLS) + .setOrigin(ShardSearchPhaseAPMMetricsTests.class.getSimpleName()) + .build() + ); + } + + @Override + public String getFeatureName() { + return ShardSearchPhaseAPMMetricsTests.class.getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "test plugin"; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index db32d796ea76a..ba186695bcdae 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; @@ -649,13 +648,7 @@ public void testMatchAllOnFilteredIndex() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, @@ -721,13 +714,7 @@ public void testTermOnFilteredIndex() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, @@ -790,13 +777,7 @@ public void testTermOnFilterWithMatchAll() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, diff --git a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java index c699e117ffbf4..d5e930321db95 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java @@ -77,7 +77,7 @@ protected SearchHit nextDoc(int doc) { } }; - SearchHit[] hits = it.iterate(null, reader, docs); + SearchHit[] hits = it.iterate(null, reader, docs, randomBoolean()); assertThat(hits.length, equalTo(docs.length)); for (int i = 0; i < hits.length; i++) { @@ -125,7 +125,7 @@ protected SearchHit nextDoc(int doc) { } }; - Exception e = expectThrows(FetchPhaseExecutionException.class, () -> it.iterate(null, reader, docs)); + Exception e = expectThrows(FetchPhaseExecutionException.class, () -> it.iterate(null, reader, docs, randomBoolean())); assertThat(e.getMessage(), containsString("Error running fetch phase for doc [" + badDoc + "]")); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index 9957d8c92b955..fe07cbf8efdfd 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -53,7 +53,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.Bits; @@ -308,19 +307,8 @@ public void doTestContextIndexSearcher(boolean sparse, boolean deletions) throws w.deleteDocuments(new Term("delete", "yes")); IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); - BitsetFilterCache.Listener listener = new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }; DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(w), new ShardId(settings.getIndex(), 0)); - BitsetFilterCache cache = new BitsetFilterCache(settings, listener); + BitsetFilterCache cache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP); Query roleQuery = new TermQuery(new Term("allowed", "yes")); BitSet bitSet = cache.getBitSetProducer(roleQuery).getBitSet(reader.leaves().get(0)); if (sparse) { diff --git a/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java b/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java index d0c85a33acf09..8cc40570ab4bb 100644 --- a/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java @@ -12,8 +12,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java index b0bf7e6636498..7923cb5f0d918 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -17,11 +17,11 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java index 384564ac01e2a..af6782c45dce8 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java @@ -13,11 +13,11 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java index 0c95340fdb6f7..33978b4cd6b9f 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java @@ -54,7 +54,7 @@ public void clearScroll(ClearScrollRequest request, ActionListener stats( + ClusterState clusterState, + ClusterInfo clusterInfo, + @Nullable DesiredBalance desiredBalance + ) { + return Map.of(); + } + }; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java index 2e8dc287a4c40..fe1b08d5e738d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java @@ -9,7 +9,6 @@ package org.elasticsearch.index; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -20,7 +19,6 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.ScriptCompiler; @@ -62,13 +60,7 @@ public static MapperService newMapperService( IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings); IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers; SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); return new MapperService( () -> TransportVersion.current(), indexSettings, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index bf47efcad7b53..66d87f3532cbd 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -19,7 +19,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; @@ -283,13 +282,7 @@ public MapperService build() { getPlugins().stream().filter(p -> p instanceof MapperPlugin).map(p -> (MapperPlugin) p).collect(toList()) ).getMapperRegistry(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); var mapperService = new MapperService( () -> TransportVersion.current(), @@ -762,17 +755,11 @@ protected SearchExecutionContext createSearchExecutionContext(MapperService mapp IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); final SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); final long nowInMillis = randomNonNegativeLong(); - return new SearchExecutionContext(0, 0, indexSettings, new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }), + return new SearchExecutionContext( + 0, + 0, + indexSettings, + new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP), (ft, fdc) -> ft.fielddataBuilder(fdc).build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()), mapperService, mapperService.mappingLookup(), diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 7cd2e6e1cc82e..51f66418bb44b 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -48,12 +48,13 @@ import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.TriConsumer; @@ -144,8 +145,10 @@ import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.subphase.FetchDocValuesPhase; import org.elasticsearch.search.fetch.subphase.FetchSourcePhase; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.internal.SubSearchContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; @@ -366,13 +369,7 @@ private AggregationContext createAggregationContext( context.fielddataOperation() ) ).build(new IndexFieldDataCache.None(), breakerService); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); SearchExecutionContext searchExecutionContext = new SearchExecutionContext( 0, -1, @@ -473,7 +470,18 @@ private SubSearchContext buildSubSearchContext( .when(subContext) .getNestedDocuments(); when(ctx.getSearchExecutionContext()).thenReturn(subContext); - + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + new ShardId("index", "indexUUID", 0), + 0, + 1, + AliasFilter.EMPTY, + 1f, + 0L, + null + ); + when(ctx.request()).thenReturn(request); IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0)); when(indexShard.indexSettings()).thenReturn(indexSettings); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index ef6600032ca1b..bdf323afb8d96 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -13,7 +13,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.MockResolvedIndices; import org.elasticsearch.action.OriginalIndices; @@ -58,7 +57,6 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.DateFieldRangeInfo; @@ -486,13 +484,7 @@ private static class ServiceHolder implements Closeable { IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings); scriptService = new MockScriptService(Settings.EMPTY, scriptModule.engines, scriptModule.contexts); similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap()); - this.bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + this.bitsetFilterCache = new BitsetFilterCache(idxSettings, BitsetFilterCache.Listener.NOOP); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); mapperService = new MapperService( clusterService, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 1edc800956a67..207409dfcf751 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -435,11 +435,6 @@ private static void setTestSysProps(Random random) { // We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each // other if we allow them to set the number of available processors as it's set-once in Netty. System.setProperty("es.set.netty.runtime.available.processors", "false"); - - // sometimes use the java.time date formatters - if (random.nextBoolean()) { - System.setProperty("es.datetime.java_time_parsers", "true"); - } } protected final Logger logger = LogManager.getLogger(getClass()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 5851fc709d14a..6c501898d5fe1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -68,6 +68,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -304,6 +305,10 @@ public static void assertHitCount(SearchRequestBuilder searchRequestBuilder, lon assertResponse(searchRequestBuilder, res -> assertHitCount(res, expectedHitCount)); } + public static void assertHitCount(long expectedHitCount, SearchRequestBuilder... searchRequestBuilders) { + assertResponses(res -> assertHitCount(res, expectedHitCount), searchRequestBuilders); + } + public static void assertHitCount(ActionFuture responseFuture, long expectedHitCount) { try { assertResponse(responseFuture, res -> assertHitCount(res, expectedHitCount)); @@ -375,6 +380,37 @@ public static void assertNoFailuresAndResponse(ActionFuture resp } } + /** + * Same as {@link #assertResponse(RequestBuilder, Consumer)} but runs the same assertion on multiple requests that are started + * concurrently. + */ + @SafeVarargs + public static void assertResponses( + Consumer consumer, + RequestBuilder... searchRequestBuilder + ) { + List> futures = new ArrayList<>(searchRequestBuilder.length); + for (RequestBuilder builder : searchRequestBuilder) { + futures.add(builder.execute()); + } + Throwable tr = null; + for (Future f : futures) { + try { + var res = f.get(); + try { + consumer.accept(res); + } finally { + res.decRef(); + } + } catch (Throwable t) { + tr = ExceptionsHelper.useOrSuppress(tr, t); + } + } + if (tr != null) { + throw new AssertionError(tr); + } + } + public static void assertResponse( RequestBuilder searchRequestBuilder, Consumer consumer diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 0a3cf6726ea4a..28c9905386091 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -817,7 +817,26 @@ protected Set preserveILMPolicyIds() { ".fleet-file-tohost-meta-ilm-policy", ".deprecation-indexing-ilm-policy", ".monitoring-8-ilm-policy", - "behavioral_analytics-events-default_policy" + "behavioral_analytics-events-default_policy", + "logs-apm.app_logs-default_policy", + "logs-apm.error_logs-default_policy", + "metrics-apm.app_metrics-default_policy", + "metrics-apm.internal_metrics-default_policy", + "metrics-apm.service_destination_10m_metrics-default_policy", + "metrics-apm.service_destination_1m_metrics-default_policy", + "metrics-apm.service_destination_60m_metrics-default_policy", + "metrics-apm.service_summary_10m_metrics-default_policy", + "metrics-apm.service_summary_1m_metrics-default_policy", + "metrics-apm.service_summary_60m_metrics-default_policy", + "metrics-apm.service_transaction_10m_metrics-default_policy", + "metrics-apm.service_transaction_1m_metrics-default_policy", + "metrics-apm.service_transaction_60m_metrics-default_policy", + "metrics-apm.transaction_10m_metrics-default_policy", + "metrics-apm.transaction_1m_metrics-default_policy", + "metrics-apm.transaction_60m_metrics-default_policy", + "traces-apm.rum_traces-default_policy", + "traces-apm.sampled_traces-default_policy", + "traces-apm.traces-default_policy" ); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java index 1aab7f5d6f429..7da0bb247bd8d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java @@ -66,7 +66,7 @@ protected RestController controller() { * Sends the given request to the test controller in {@link #controller()}. */ protected void dispatchRequest(RestRequest request) { - FakeRestChannel channel = new FakeRestChannel(request, false, 1); + FakeRestChannel channel = new FakeRestChannel(request, true, 1); ThreadContext threadContext = verifyingClient.threadPool().getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { controller.dispatchRequest(request, channel, threadContext); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index 194dfc057b84f..e43aa940a4881 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -92,14 +92,6 @@ public class RestTestLegacyFeatures implements FeatureSpecification { @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) public static final NodeFeature ML_NLP_SUPPORTED = new NodeFeature("ml.nlp_supported"); - /* - * Starting with 8.11, cluster state has minimum system index mappings versions (#99307) and the system index mappings upgrade service - * started using them to determine when to update mappings for system indices. See https://github.com/elastic/elasticsearch/pull/99668 - */ - public static final NodeFeature MAPPINGS_UPGRADE_SERVICE_USES_MAPPINGS_VERSION = new NodeFeature( - "mappings.upgrade_service_uses_mappings_version" - ); - // YAML public static final NodeFeature REST_ELASTIC_PRODUCT_HEADER_PRESENT = new NodeFeature("action.rest.product_header_present"); @@ -134,8 +126,7 @@ public Map getHistoricalFeatures() { entry(DATA_STREAMS_SUPPORTED, Version.V_7_9_0), entry(NEW_DATA_STREAMS_INDEX_NAME_FORMAT, Version.V_7_11_0), entry(DISABLE_FIELD_NAMES_FIELD_REMOVED, Version.V_8_0_0), - entry(ML_NLP_SUPPORTED, Version.V_8_0_0), - entry(MAPPINGS_UPGRADE_SERVICE_USES_MAPPINGS_VERSION, Version.V_8_11_0) + entry(ML_NLP_SUPPORTED, Version.V_8_0_0) ); } } diff --git a/test/immutable-collections-patch/build.gradle b/test/immutable-collections-patch/build.gradle index 28aad96754629..381c0cd6dd044 100644 --- a/test/immutable-collections-patch/build.gradle +++ b/test/immutable-collections-patch/build.gradle @@ -26,14 +26,14 @@ def outputDir = layout.buildDirectory.dir("jdk-patches") def generatePatch = tasks.register("generatePatch", JavaExec) generatePatch.configure { dependsOn tasks.named("compileJava") - inputs.property("java-home-set", BuildParams.getIsRuntimeJavaHomeSet()) - inputs.property("java-version", BuildParams.runtimeJavaVersion) + inputs.property("java-home-set", buildParams.getIsRuntimeJavaHomeSet()) + inputs.property("java-version", buildParams.runtimeJavaVersion) outputs.dir(outputDir) classpath = sourceSets.main.runtimeClasspath mainClass = 'org.elasticsearch.jdk.patch.ImmutableCollectionsPatcher' - if (BuildParams.getIsRuntimeJavaHomeSet()) { - executable = "${BuildParams.runtimeJavaHome}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') + if (buildParams.getIsRuntimeJavaHomeSet()) { + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') } else { javaLauncher = javaToolchains.launcherFor { languageVersion = JavaLanguageVersion.of(VersionProperties.bundledJdkMajorVersion) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index ec1bf13bd993b..717cf96ad6a92 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -59,6 +59,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.function.Predicate.not; import static org.elasticsearch.test.cluster.local.distribution.DistributionType.DEFAULT; import static org.elasticsearch.test.cluster.util.OS.WINDOWS; @@ -755,18 +756,16 @@ private Map getEnvironmentVariables() { } String heapSize = System.getProperty("tests.heap.size", "512m"); - final String esJavaOpts = Stream.of( - "-Xms" + heapSize, - "-Xmx" + heapSize, - "-ea", - "-esa", - System.getProperty("tests.jvm.argline", ""), - featureFlagProperties, - systemProperties, - jvmArgs, - debugArgs - ).filter(s -> s.isEmpty() == false).collect(Collectors.joining(" ")); + List serverOpts = List.of("-Xms" + heapSize, "-Xmx" + heapSize, debugArgs, featureFlagProperties); + List commonOpts = List.of("-ea", "-esa", System.getProperty("tests.jvm.argline", ""), systemProperties, jvmArgs); + + String esJavaOpts = Stream.concat(serverOpts.stream(), commonOpts.stream()) + .filter(not(String::isEmpty)) + .collect(Collectors.joining(" ")); + String cliJavaOpts = commonOpts.stream().filter(not(String::isEmpty)).collect(Collectors.joining(" ")); + environment.put("ES_JAVA_OPTS", esJavaOpts); + environment.put("CLI_JAVA_OPTS", cliJavaOpts); return environment; } diff --git a/x-pack/libs/es-opensaml-security-api/build.gradle b/x-pack/libs/es-opensaml-security-api/build.gradle index b36d0bfa7b37d..3b4434ec5d9e5 100644 --- a/x-pack/libs/es-opensaml-security-api/build.gradle +++ b/x-pack/libs/es-opensaml-security-api/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' dependencies { implementation "org.opensaml:opensaml-security-api:${versions.opensaml}" diff --git a/x-pack/plugin/analytics/build.gradle b/x-pack/plugin/analytics/build.gradle index c451df58b9fab..ddc075cc9adcc 100644 --- a/x-pack/plugin/analytics/build.gradle +++ b/x-pack/plugin/analytics/build.gradle @@ -21,7 +21,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index 3c7a18de536bc..e684092099948 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -176,9 +176,9 @@ private List docs(long startTimestamp, String dim, long... values) thr } private static BytesReference tsid(String dim) throws IOException { - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder idBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - idBuilder.addString("dim", dim); - return idBuilder.buildTsidHash(); + var routingFields = new RoutingPathFields(null); + routingFields.addString("dim", dim); + return routingFields.buildHash(); } private Document doc(long timestamp, BytesReference tsid, long counterValue, String dim) { diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml index 627d6345d6b77..07b1bd9cbcd7e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: logs-apm.app_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml index a97c004fa1707..85d8452506493 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: logs-apm.error_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml index 23130ef8400c2..9610b38923bbb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.app_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml index 7fbf7941ea538..625db0ddf063d 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.internal_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml index a7fe53f56474b..aff33171c4b58 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_destination_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml index 274c8c604582c..46f0e74d66d6c 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_destination_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml index 2d894dec48ac4..01b5057fb4124 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_destination_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml index 612bf6ff7c1d0..9a2c8cc4e0f0b 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_summary_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml index e86eb803de63f..011380ea40c1f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_summary_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml index 4b4e14eb711e0..32b4840d26a4c 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_summary_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml index fc03e62bcc4cd..80118df29877f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_transaction_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml index 9021506be3d33..673c17d972c5e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_transaction_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml index 961b0a35543a7..a04870d4224ca 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_transaction_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml index e2504def2505c..abadcbf58bd62 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.transaction_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml index 7bfbcc7bb8052..b8af9a8b96f56 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.transaction_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml index 48e6ee5a09c20..3d13284934ade 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.transaction_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml index 360693e97ae2b..7fc2ca2343ea5 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: traces-apm.traces-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml index 6dfd79341424f..207307b396dc6 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: traces-apm.rum_traces-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml index 2193dbf58488b..975e19693b656 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: traces-apm.sampled_traces-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml new file mode 100644 index 0000000000000..ab73c1c357897 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 10d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml new file mode 100644 index 0000000000000..ab73c1c357897 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 10d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml new file mode 100644 index 0000000000000..19fbd66e954cb --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml new file mode 100644 index 0000000000000..19fbd66e954cb --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml new file mode 100644 index 0000000000000..19fbd66e954cb --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml new file mode 100644 index 0000000000000..2c25f5ec568c6 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml @@ -0,0 +1,13 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 1h + delete: + min_age: 1h + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml new file mode 100644 index 0000000000000..ab73c1c357897 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 10d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index a178b768c4fe9..fa209cdec3695 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 10 +version: 11 component-templates: # Data lifecycle. @@ -97,3 +97,24 @@ ingest-pipelines: - metrics-apm@pipeline: dependencies: - apm@pipeline + +lifecycle-policies: + - logs-apm.app_logs-default_policy + - logs-apm.error_logs-default_policy + - metrics-apm.app_metrics-default_policy + - metrics-apm.internal_metrics-default_policy + - metrics-apm.service_destination_10m_metrics-default_policy + - metrics-apm.service_destination_1m_metrics-default_policy + - metrics-apm.service_destination_60m_metrics-default_policy + - metrics-apm.service_summary_10m_metrics-default_policy + - metrics-apm.service_summary_1m_metrics-default_policy + - metrics-apm.service_summary_60m_metrics-default_policy + - metrics-apm.service_transaction_10m_metrics-default_policy + - metrics-apm.service_transaction_1m_metrics-default_policy + - metrics-apm.service_transaction_60m_metrics-default_policy + - metrics-apm.transaction_10m_metrics-default_policy + - metrics-apm.transaction_1m_metrics-default_policy + - metrics-apm.transaction_60m_metrics-default_policy + - traces-apm.rum_traces-default_policy + - traces-apm.sampled_traces-default_policy + - traces-apm.traces-default_policy diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index ff1debdea79b1..4a2b9265b3b05 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -44,6 +44,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.core.template.IngestPipelineConfig; import org.elasticsearch.xpack.stack.StackTemplateRegistry; import org.elasticsearch.xpack.stack.StackTemplateRegistryAccessor; @@ -57,6 +59,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -133,6 +136,7 @@ public void testThatDisablingRegistryDoesNothing() throws Exception { assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), hasSize(0)); assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), hasSize(0)); assertThat(apmIndexTemplateRegistry.getIngestPipelines(), hasSize(0)); + assertThat(apmIndexTemplateRegistry.getLifecyclePolicies(), hasSize(0)); client.setVerifier((a, r, l) -> { fail("if the registry is disabled nothing should happen"); @@ -145,6 +149,7 @@ public void testThatDisablingRegistryDoesNothing() throws Exception { assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), not(hasSize(0))); assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), not(hasSize(0))); assertThat(apmIndexTemplateRegistry.getIngestPipelines(), not(hasSize(0))); + assertThat(apmIndexTemplateRegistry.getLifecyclePolicies(), not(hasSize(0))); } public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Exception { @@ -154,23 +159,26 @@ public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Ex AtomicInteger actualInstalledIndexTemplates = new AtomicInteger(0); AtomicInteger actualInstalledComponentTemplates = new AtomicInteger(0); AtomicInteger actualInstalledIngestPipelines = new AtomicInteger(0); + AtomicInteger actualILMPolicies = new AtomicInteger(0); client.setVerifier( (action, request, listener) -> verifyActions( actualInstalledIndexTemplates, actualInstalledComponentTemplates, actualInstalledIngestPipelines, + actualILMPolicies, action, request, listener ) ); - apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), nodes)); + apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), List.of(), Map.of(), nodes)); assertBusy(() -> assertThat(actualInstalledIngestPipelines.get(), equalTo(getIndependentPipelineConfigs().size()))); assertBusy(() -> assertThat(actualInstalledComponentTemplates.get(), equalTo(getIndependentComponentTemplateConfigs().size()))); + assertBusy(() -> assertThat(actualILMPolicies.get(), equalTo(getIndependentLifecyclePolicies().size()))); - // index templates should not be installed as they are dependent in component templates and ingest pipelines + // index templates should not be installed as they are dependent on component templates and ingest pipelines assertThat(actualInstalledIndexTemplates.get(), equalTo(0)); } @@ -201,6 +209,31 @@ public void testIngestPipelines() throws Exception { }); } + public void testILMLifecyclePolicies() throws Exception { + DiscoveryNode node = DiscoveryNodeUtils.create("node"); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + final List lifecyclePolicies = apmIndexTemplateRegistry.getLifecyclePolicies(); + assertThat(lifecyclePolicies, is(not(empty()))); + + final Set expectedILMPolicies = apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .map(LifecyclePolicy::getName) + .collect(Collectors.toSet()); + final Set installedILMPolicies = ConcurrentHashMap.newKeySet(lifecyclePolicies.size()); + client.setVerifier((a, r, l) -> { + if (a == ILMActions.PUT && r instanceof PutLifecycleRequest putLifecycleRequest) { + if (expectedILMPolicies.contains(putLifecycleRequest.getPolicy().getName())) { + installedILMPolicies.add(putLifecycleRequest.getPolicy().getName()); + } + } + return AcknowledgedResponse.TRUE; + }); + + apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), List.of(), Map.of(), nodes)); + assertBusy(() -> { assertThat(installedILMPolicies, equalTo(expectedILMPolicies)); }); + } + public void testComponentTemplates() throws Exception { DiscoveryNode node = DiscoveryNodeUtils.create("node"); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); @@ -208,12 +241,14 @@ public void testComponentTemplates() throws Exception { AtomicInteger actualInstalledIndexTemplates = new AtomicInteger(0); AtomicInteger actualInstalledComponentTemplates = new AtomicInteger(0); AtomicInteger actualInstalledIngestPipelines = new AtomicInteger(0); + AtomicInteger actualILMPolicies = new AtomicInteger(0); client.setVerifier( (action, request, listener) -> verifyActions( actualInstalledIndexTemplates, actualInstalledComponentTemplates, actualInstalledIngestPipelines, + actualILMPolicies, action, request, listener @@ -224,6 +259,9 @@ public void testComponentTemplates() throws Exception { Map.of(), Map.of(), apmIndexTemplateRegistry.getIngestPipelines().stream().map(IngestPipelineConfig::getId).collect(Collectors.toList()), + apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())), nodes ) ); @@ -237,8 +275,10 @@ public void testComponentTemplates() throws Exception { // ingest pipelines should not have been installed as we used a cluster state that includes them already assertThat(actualInstalledIngestPipelines.get(), equalTo(0)); - // index templates should not be installed as they are dependent in component templates and ingest pipelines + // index templates should not be installed as they are dependent on component templates and ingest pipelines assertThat(actualInstalledIndexTemplates.get(), equalTo(0)); + // ilm policies should not have been installed as we used a cluster state that includes them already + assertThat(actualILMPolicies.get(), equalTo(0)); } public void testIndexTemplates() throws Exception { @@ -248,12 +288,14 @@ public void testIndexTemplates() throws Exception { AtomicInteger actualInstalledIndexTemplates = new AtomicInteger(0); AtomicInteger actualInstalledComponentTemplates = new AtomicInteger(0); AtomicInteger actualInstalledIngestPipelines = new AtomicInteger(0); + AtomicInteger actualILMPolicies = new AtomicInteger(0); client.setVerifier( (action, request, listener) -> verifyActions( actualInstalledIndexTemplates, actualInstalledComponentTemplates, actualInstalledIngestPipelines, + actualILMPolicies, action, request, listener @@ -272,6 +314,9 @@ public void testIndexTemplates() throws Exception { componentTemplates, Map.of(), apmIndexTemplateRegistry.getIngestPipelines().stream().map(IngestPipelineConfig::getId).collect(Collectors.toList()), + apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())), nodes ) ); @@ -280,9 +325,11 @@ public void testIndexTemplates() throws Exception { () -> assertThat(actualInstalledIndexTemplates.get(), equalTo(apmIndexTemplateRegistry.getComposableTemplateConfigs().size())) ); - // ingest pipelines and component templates should not have been installed as we used a cluster state that includes them already + // ingest pipelines, component templates, and lifecycle policies should not have been installed as we used a cluster state that + // includes them already assertThat(actualInstalledComponentTemplates.get(), equalTo(0)); assertThat(actualInstalledIngestPipelines.get(), equalTo(0)); + assertThat(actualILMPolicies.get(), equalTo(0)); } public void testIndexTemplateConventions() throws Exception { @@ -408,10 +455,18 @@ private List getIndependentPipelineConfigs() { .collect(Collectors.toList()); } + private Map getIndependentLifecyclePolicies() { + // All lifecycle policies are independent + return apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())); + } + private ActionResponse verifyActions( AtomicInteger indexTemplatesCounter, AtomicInteger componentTemplatesCounter, AtomicInteger ingestPipelinesCounter, + AtomicInteger ilmPolicyCounter, ActionType action, ActionRequest request, ActionListener listener @@ -430,6 +485,9 @@ private ActionResponse verifyActions( } else if (action == PutPipelineTransportAction.TYPE) { ingestPipelinesCounter.incrementAndGet(); return AcknowledgedResponse.TRUE; + } else if (action == ILMActions.PUT) { + ilmPolicyCounter.incrementAndGet(); + return AcknowledgedResponse.TRUE; } else { fail("client called with unexpected request:" + request.toString()); return null; diff --git a/x-pack/plugin/async-search/qa/rest/build.gradle b/x-pack/plugin/async-search/qa/rest/build.gradle index 4fc557a5b6048..c950646930779 100644 --- a/x-pack/plugin/async-search/qa/rest/build.gradle +++ b/x-pack/plugin/async-search/qa/rest/build.gradle @@ -28,5 +28,5 @@ testClusters.configureEach { // Test clusters run with security disabled tasks.named("yamlRestTest") { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index a253b6bdd2360..0fb4267745cb8 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -115,24 +115,16 @@ public LongHistogram getCacheMissLoadTimes() { * * @param bytesCopied The number of bytes copied * @param copyTimeNanos The time taken to copy the bytes in nanoseconds - * @param index The index being loaded - * @param shardId The ID of the shard being loaded * @param cachePopulationReason The reason for the cache being populated * @param cachePopulationSource The source from which the data is being loaded */ public void recordCachePopulationMetrics( int bytesCopied, long copyTimeNanos, - String index, - int shardId, CachePopulationReason cachePopulationReason, CachePopulationSource cachePopulationSource ) { Map metricAttributes = Map.of( - INDEX_ATTRIBUTE_KEY, - index, - SHARD_ID_ATTRIBUTE_KEY, - shardId, CACHE_POPULATION_REASON_ATTRIBUTE_KEY, cachePopulationReason.name(), CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY, diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java index ea9d0b7356f0e..435798ba93a8b 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java @@ -30,15 +30,11 @@ public void createMetrics() { public void testRecordCachePopulationMetricsRecordsThroughput() { int mebiBytesSent = randomIntBetween(1, 4); int secondsTaken = randomIntBetween(1, 5); - String indexName = randomIdentifier(); - int shardId = randomIntBetween(0, 10); BlobCacheMetrics.CachePopulationReason cachePopulationReason = randomFrom(BlobCacheMetrics.CachePopulationReason.values()); CachePopulationSource cachePopulationSource = randomFrom(CachePopulationSource.values()); metrics.recordCachePopulationMetrics( Math.toIntExact(ByteSizeValue.ofMb(mebiBytesSent).getBytes()), TimeUnit.SECONDS.toNanos(secondsTaken), - indexName, - shardId, cachePopulationReason, cachePopulationSource ); @@ -48,32 +44,28 @@ public void testRecordCachePopulationMetricsRecordsThroughput() { .getMeasurements(InstrumentType.DOUBLE_HISTOGRAM, "es.blob_cache.population.throughput.histogram") .get(0); assertEquals(throughputMeasurement.getDouble(), (double) mebiBytesSent / secondsTaken, 0.0); - assertExpectedAttributesPresent(throughputMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(throughputMeasurement, cachePopulationReason, cachePopulationSource); // bytes counter Measurement totalBytesMeasurement = recordingMeterRegistry.getRecorder() .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.bytes.total") .get(0); assertEquals(totalBytesMeasurement.getLong(), ByteSizeValue.ofMb(mebiBytesSent).getBytes()); - assertExpectedAttributesPresent(totalBytesMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(totalBytesMeasurement, cachePopulationReason, cachePopulationSource); // time counter Measurement totalTimeMeasurement = recordingMeterRegistry.getRecorder() .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.time.total") .get(0); assertEquals(totalTimeMeasurement.getLong(), TimeUnit.SECONDS.toMillis(secondsTaken)); - assertExpectedAttributesPresent(totalTimeMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(totalTimeMeasurement, cachePopulationReason, cachePopulationSource); } private static void assertExpectedAttributesPresent( Measurement measurement, - int shardId, - String indexName, BlobCacheMetrics.CachePopulationReason cachePopulationReason, CachePopulationSource cachePopulationSource ) { - assertEquals(measurement.attributes().get(BlobCacheMetrics.SHARD_ID_ATTRIBUTE_KEY), shardId); - assertEquals(measurement.attributes().get(BlobCacheMetrics.INDEX_ATTRIBUTE_KEY), indexName); assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_REASON_ATTRIBUTE_KEY), cachePopulationReason.name()); assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY), cachePopulationSource.name()); } diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 193a82436f26a..e25d7fb359acb 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -37,7 +37,7 @@ artifacts { def restTestBlacklist = [] // TODO: fix this rest test to not depend on a hardcoded port! restTestBlacklist.addAll(['getting_started/10_monitor_cluster_health/*']) -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { // these tests attempt to install basic/internal licenses signed against the dev/public.key // Since there is no infrastructure in place (anytime soon) to generate licenses using the production // private key, these tests are blacklisted in non-snapshot test runs diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index 583ad5d8c3df3..4be504e616920 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -10,6 +10,6 @@ subprojects { tasks.withType(Test).configureEach { // These fail in CI but only when run as part of checkPart2 and not individually. // Tracked in : https://github.com/elastic/elasticsearch/issues/66661 - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } } diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index fb4acb0055a8c..b4f17cb436df5 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -94,7 +94,7 @@ tasks.named("processResources").configure { String licenseKey = providers.systemProperty("license.key").getOrNull() if (licenseKey != null) { println "Using provided license key from ${licenseKey}" - } else if (BuildParams.isSnapshotBuild()) { + } else if (buildParams.isSnapshotBuild()) { licenseKey = Paths.get(project.projectDir.path, 'snapshot.key') } else { throw new IllegalArgumentException('Property license.key must be set for release build') @@ -155,13 +155,13 @@ testClusters.configureEach { requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.15.0") } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.withType(Test).configureEach { systemProperty 'es.failure_store_feature_flag_enabled', 'true' } } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // Test clusters run with security disabled tasks.named("javaRestTest").configure { enabled = false } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index b1dac4898945d..a054a18221e9b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -34,6 +34,7 @@ public class EnterpriseSearchFeatureSetUsage extends XPackFeatureUsage { public static final String MIN_RULE_COUNT = "min_rule_count"; public static final String MAX_RULE_COUNT = "max_rule_count"; public static final String RULE_CRITERIA_TOTAL_COUNTS = "rule_criteria_total_counts"; + public static final String RULE_TYPE_TOTAL_COUNTS = "rule_type_total_counts"; private final Map searchApplicationsUsage; private final Map analyticsCollectionsUsage; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java index 33a3f2424c90c..4772277ae2375 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java @@ -18,8 +18,13 @@ public interface RemainingTime extends Supplier { * Create a {@link Supplier} that returns a decreasing {@link TimeValue} on each invocation, representing the amount of time until * the call times out. The timer starts when this method is called and counts down from remainingTime to 0. * currentTime should return the most up-to-date system time, for example Instant.now() or Clock.instant(). + * {@link TimeValue#MAX_VALUE} is a special case where the remaining time is always TimeValue.MAX_VALUE. */ static RemainingTime from(Supplier currentTime, TimeValue remainingTime) { + if (remainingTime.equals(TimeValue.MAX_VALUE)) { + return () -> TimeValue.MAX_VALUE; + } + var timeout = currentTime.get().plus(remainingTime.duration(), remainingTime.timeUnit().toChronoUnit()); var maxRemainingTime = remainingTime.nanos(); return () -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java index 4fb94dce1dcd0..8fe8c8835b98d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.template.resources.TemplateResources; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -48,19 +49,32 @@ public static LifecyclePolicy loadPolicy( source = replaceVariables(source, variables); validate(source); - try ( - XContentParser parser = XContentType.JSON.xContent() - .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry), source) - ) { - LifecyclePolicy policy = LifecyclePolicy.parse(parser, name); - policy.validate(); - return policy; - } + return parsePolicy(source, name, xContentRegistry, XContentType.JSON); } catch (Exception e) { throw new IllegalArgumentException("unable to load policy [" + name + "] from [" + resource + "]", e); } } + /** + * Parses lifecycle policy based on the provided content type without doing any variable substitution. + * It is caller's responsibility to do any variable substitution if required. + */ + public static LifecyclePolicy parsePolicy( + String rawPolicy, + String name, + NamedXContentRegistry xContentRegistry, + XContentType contentType + ) throws IOException { + try ( + XContentParser parser = contentType.xContent() + .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry), rawPolicy) + ) { + LifecyclePolicy policy = LifecyclePolicy.parse(parser, name); + policy.validate(); + return policy; + } + } + private static String replaceVariables(String template, Map variables) { for (Map.Entry variable : variables.entrySet()) { template = replaceVariable(template, variable.getKey(), variable.getValue()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java index c8ddd46c5912f..a30236b2fef28 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; @@ -22,7 +23,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.yaml.YamlXContent; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicyUtils; import java.io.IOException; import java.util.Collections; @@ -48,6 +52,7 @@ public abstract class YamlTemplateRegistry extends IndexTemplateRegistry { private final Map componentTemplates; private final Map composableIndexTemplates; private final List ingestPipelines; + private final List lifecyclePolicies; private final FeatureService featureService; private volatile boolean enabled; @@ -84,6 +89,7 @@ public YamlTemplateRegistry( final List componentTemplateNames = (List) resources.get("component-templates"); final List indexTemplateNames = (List) resources.get("index-templates"); final List ingestPipelineConfigs = (List) resources.get("ingest-pipelines"); + final List lifecyclePolicyConfigs = (List) resources.get("lifecycle-policies"); componentTemplates = Optional.ofNullable(componentTemplateNames) .orElse(Collections.emptyList()) @@ -110,9 +116,16 @@ public YamlTemplateRegistry( ); }) .collect(Collectors.toList()); + lifecyclePolicies = Optional.ofNullable(lifecyclePolicyConfigs) + .orElse(Collections.emptyList()) + .stream() + .map(o -> (String) o) + .filter(templateFilter) + .map(this::loadLifecyclePolicy) + .collect(Collectors.toList()); this.featureService = featureService; } catch (IOException e) { - throw new RuntimeException(e); + throw new ElasticsearchException(e); } } @@ -178,6 +191,15 @@ public List getIngestPipelines() { } } + @Override + public List getLifecyclePolicies() { + if (enabled) { + return lifecyclePolicies; + } else { + return Collections.emptyList(); + } + } + protected abstract String getVersionProperty(); private ComponentTemplate loadComponentTemplate(String name, int version) { @@ -192,7 +214,7 @@ private ComponentTemplate loadComponentTemplate(String name, int version) { return ComponentTemplate.parse(parser); } } catch (Exception e) { - throw new RuntimeException("failed to load " + getName() + " Ingest plugin's component template: " + name, e); + throw new ElasticsearchException("failed to load " + getName() + " Ingest plugin's component template: " + name, e); } } @@ -208,7 +230,7 @@ private ComposableIndexTemplate loadIndexTemplate(String name, int version) { return ComposableIndexTemplate.parse(parser); } } catch (Exception e) { - throw new RuntimeException("failed to load " + getName() + " Ingest plugin's index template: " + name, e); + throw new ElasticsearchException("failed to load " + getName() + " Ingest plugin's index template: " + name, e); } } @@ -226,6 +248,17 @@ private IngestPipelineConfig loadIngestPipeline(String name, int version, @Nulla ); } + // IndexTemplateRegistry ensures that ILM lifecycle policies are not loaded + // when in DSL only mode. + private LifecyclePolicy loadLifecyclePolicy(String name) { + try { + var rawPolicy = loadResource(this.getClass(), "/lifecycle-policies/" + name + ".yaml"); + return LifecyclePolicyUtils.parsePolicy(rawPolicy, name, LifecyclePolicyConfig.DEFAULT_X_CONTENT_REGISTRY, XContentType.YAML); + } catch (IOException e) { + throw new ElasticsearchException(e); + } + } + @Override protected boolean applyRolloverAfterTemplateV2Update() { return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java index 7b0bd8a8108e9..815f6f0741440 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.watcher.transport.actions.put; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; @@ -17,7 +16,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.Map; @@ -56,30 +54,14 @@ public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, Map times(Instant... instants) { var startTime = Stream.of(Instant.now()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java index 01a12fb795316..21e3155501995 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java @@ -189,7 +189,10 @@ public void testNoShardStats() { shardStats[0] = sStats; mockXPackInfo(true, true); - mockIndexStatsCall(indexName, new IndexStats(indexName, "uuid", ClusterHealthStatus.GREEN, IndexMetadata.State.OPEN, shardStats)); + mockIndexStatsCall( + indexName, + new IndexStats(indexName, "uuid", ClusterHealthStatus.GREEN, IndexMetadata.State.OPEN, null, null, shardStats) + ); final SetOnce conditionMetHolder = new SetOnce<>(); final SetOnce stepInfoHolder = new SetOnce<>(); @@ -289,7 +292,7 @@ private IndexStats randomIndexStats(boolean isLeaderIndex, int numOfShards) { for (int i = 0; i < numOfShards; i++) { shardStats[i] = randomShardStats(isLeaderIndex); } - return new IndexStats(randomAlphaOfLength(5), randomAlphaOfLength(10), null, null, shardStats); + return new IndexStats(randomAlphaOfLength(5), randomAlphaOfLength(10), null, null, null, null, shardStats); } private ShardStats randomShardStats(boolean isLeaderIndex) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java index 6f6224d505327..25d9509ecdc7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java @@ -10,14 +10,15 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; public class MlRestTestStateCleaner { @@ -30,24 +31,29 @@ public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { } public void resetFeatures() throws IOException { - waitForMlStatsIndexToInitialize(); - deleteAllTrainedModelIngestPipelines(); + deletePipelinesWithInferenceProcessors(); // This resets all features, not just ML, but they should have been getting reset between tests anyway so it shouldn't matter adminClient.performRequest(new Request("POST", "/_features/_reset")); } @SuppressWarnings("unchecked") - private void deleteAllTrainedModelIngestPipelines() throws IOException { - final Request getAllTrainedModelStats = new Request("GET", "/_ml/trained_models/_stats"); - getAllTrainedModelStats.addParameter("size", "10000"); - final Response trainedModelsStatsResponse = adminClient.performRequest(getAllTrainedModelStats); + private void deletePipelinesWithInferenceProcessors() throws IOException { + final Response pipelinesResponse = adminClient.performRequest(new Request("GET", "/_ingest/pipeline")); + final Map pipelines = ESRestTestCase.entityAsMap(pipelinesResponse); + + var pipelinesWithInferenceProcessors = new HashSet(); + for (var entry : pipelines.entrySet()) { + var pipelineDef = (Map) entry.getValue(); // each top level object is a separate pipeline + var processors = (List>) pipelineDef.get("processors"); + for (var processor : processors) { + assertThat(processor.entrySet(), hasSize(1)); + if ("inference".equals(processor.keySet().iterator().next())) { + pipelinesWithInferenceProcessors.add(entry.getKey()); + } + } + } - final List> pipelines = (List>) XContentMapValues.extractValue( - "trained_model_stats.ingest.pipelines", - ESRestTestCase.entityAsMap(trainedModelsStatsResponse) - ); - Set pipelineIds = pipelines.stream().flatMap(m -> m.keySet().stream()).collect(Collectors.toSet()); - for (String pipelineId : pipelineIds) { + for (String pipelineId : pipelinesWithInferenceProcessors) { try { adminClient.performRequest(new Request("DELETE", "/_ingest/pipeline/" + pipelineId)); } catch (Exception ex) { @@ -55,12 +61,4 @@ private void deleteAllTrainedModelIngestPipelines() throws IOException { } } } - - private void waitForMlStatsIndexToInitialize() throws IOException { - ESRestTestCase.ensureHealth(adminClient, ".ml-stats-*", (request) -> { - request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("level", "shards"); - request.addParameter("timeout", "30s"); - }); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java index 2c31965009273..a39aff3a6137f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java @@ -131,7 +131,9 @@ public void testCollapseAndRemoveUnsupportedPrivileges() { // create random groups with random privileges for random clusters List randomGroups = generateRandomGroups(true); // replace a random value with one that is allowed - String singleValidPrivilege = randomFrom(RemoteClusterPermissions.allowedRemoteClusterPermissions.get(TransportVersion.current())); + String singleValidPrivilege = randomFrom( + RemoteClusterPermissions.allowedRemoteClusterPermissions.get(lastTransportVersionPermission) + ); groupPrivileges.get(0)[0] = singleValidPrivilege; for (int i = 0; i < randomGroups.size(); i++) { diff --git a/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle b/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle index 2d8859bdcea3d..a9580f4e14d6b 100644 --- a/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle +++ b/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle @@ -37,6 +37,6 @@ testClusters.configureEach { // Test clusters run with security disabled tasks.named("javaRestTest") { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/deprecation/qa/rest/build.gradle b/x-pack/plugin/deprecation/qa/rest/build.gradle index 70c0cadbce375..9a8b228763fe0 100644 --- a/x-pack/plugin/deprecation/qa/rest/build.gradle +++ b/x-pack/plugin/deprecation/qa/rest/build.gradle @@ -34,5 +34,5 @@ testClusters.configureEach { // Test clusters run with security disabled tasks.named("javaRestTest") { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index c80f26cda7b36..d13f3cda2a82c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -90,7 +90,7 @@ private DeprecationChecks() {} NodeDeprecationChecks::checkWatcherBulkConcurrentRequestsSetting ); - static List> INDEX_SETTINGS_CHECKS = List.of( + static List> INDEX_SETTINGS_CHECKS = List.of( IndexDeprecationChecks::oldIndicesCheck, IndexDeprecationChecks::translogRetentionSettingCheck, IndexDeprecationChecks::checkIndexDataPath, diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index cd26e23394e81..87d0bfb93e18c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -274,7 +274,7 @@ public static DeprecationInfoAction.Response from( IndexNameExpressionResolver indexNameExpressionResolver, Request request, NodesDeprecationCheckResponse nodeDeprecationResponse, - List> indexSettingsChecks, + List> indexSettingsChecks, List> dataStreamChecks, List> clusterSettingsChecks, Map> pluginSettingIssues, @@ -293,7 +293,10 @@ public static DeprecationInfoAction.Response from( Map> indexSettingsIssues = new HashMap<>(); for (String concreteIndex : concreteIndexNames) { IndexMetadata indexMetadata = stateWithSkippedSettingsRemoved.getMetadata().index(concreteIndex); - List singleIndexIssues = filterChecks(indexSettingsChecks, c -> c.apply(indexMetadata)); + List singleIndexIssues = filterChecks( + indexSettingsChecks, + c -> c.apply(indexMetadata, stateWithSkippedSettingsRemoved) + ); if (singleIndexIssues.size() > 0) { indexSettingsIssues.put(concreteIndex, singleIndexIssues); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 3da32c7f5a4c2..8144d960df2e8 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.time.DateFormatter; @@ -30,14 +31,15 @@ */ public class IndexDeprecationChecks { - static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { + static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata, ClusterState clusterState) { // TODO: this check needs to be revised. It's trivially true right now. IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion(); - if (currentCompatibilityVersion.before(IndexVersions.V_7_0_0)) { + // We intentionally exclude indices that are in data streams because they will be picked up by DataStreamDeprecationChecks + if (currentCompatibilityVersion.before(IndexVersions.V_8_0_0) && isNotDataStreamIndex(indexMetadata, clusterState)) { return new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 7.0", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + "breaking-changes-8.0.html", + "Old index with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + currentCompatibilityVersion.toReleaseVersion(), false, null @@ -46,7 +48,11 @@ static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadata) { + private static boolean isNotDataStreamIndex(IndexMetadata indexMetadata, ClusterState clusterState) { + return clusterState.metadata().findDataStreams(indexMetadata.getIndex().getName()).isEmpty(); + } + + static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { final boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexMetadata.getSettings()); if (softDeletesEnabled) { if (IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(indexMetadata.getSettings()) @@ -73,7 +79,7 @@ static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadat return null; } - static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata) { + static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata, ClusterState clusterState) { if (IndexMetadata.INDEX_DATA_PATH_SETTING.exists(indexMetadata.getSettings())) { final String message = String.format( Locale.ROOT, @@ -88,7 +94,7 @@ static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata) { + static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { final String storeType = IndexModule.INDEX_STORE_TYPE_SETTING.get(indexMetadata.getSettings()); if (IndexModule.Type.SIMPLEFS.match(storeType)) { return new DeprecationIssue( @@ -105,7 +111,7 @@ static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata) { + static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { Boolean isIndexFrozen = FrozenEngine.INDEX_FROZEN.get(indexMetadata.getSettings()); if (Boolean.TRUE.equals(isIndexFrozen)) { String indexName = indexMetadata.getIndex().getName(); @@ -195,7 +201,7 @@ static List findInPropertiesRecursively( return issues; } - static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata) { + static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata, ClusterState clusterState) { List fields = new ArrayList<>(); fieldLevelMappingIssue( indexMetadata, diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java index 5750daa8e3673..67950f3b9f623 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java @@ -117,7 +117,9 @@ public void testFrom() throws IOException { boolean dataStreamIssueFound = randomBoolean(); DeprecationIssue foundIssue = createTestDeprecationIssue(); List> clusterSettingsChecks = List.of((s) -> clusterIssueFound ? foundIssue : null); - List> indexSettingsChecks = List.of((idx) -> indexIssueFound ? foundIssue : null); + List> indexSettingsChecks = List.of( + (idx, cs) -> indexIssueFound ? foundIssue : null + ); List> dataStreamChecks = List.of( (ds, cs) -> dataStreamIssueFound ? foundIssue : null ); @@ -211,7 +213,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { DeprecationIssue foundIssue1 = createTestDeprecationIssue(metaMap1); DeprecationIssue foundIssue2 = createTestDeprecationIssue(foundIssue1, metaMap2); List> clusterSettingsChecks = Collections.emptyList(); - List> indexSettingsChecks = List.of((idx) -> null); + List> indexSettingsChecks = List.of((idx, cs) -> null); List> dataStreamChecks = List.of((ds, cs) -> null); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( @@ -276,10 +278,12 @@ public void testRemoveSkippedSettings() throws IOException { return null; })); AtomicReference visibleIndexSettings = new AtomicReference<>(); - List> indexSettingsChecks = Collections.unmodifiableList(Arrays.asList((idx) -> { - visibleIndexSettings.set(idx.getSettings()); - return null; - })); + List> indexSettingsChecks = Collections.unmodifiableList( + Arrays.asList((idx, cs) -> { + visibleIndexSettings.set(idx.getSettings()); + return null; + }) + ); AtomicInteger backingIndicesCount = new AtomicInteger(0); List> dataStreamChecks = Collections.unmodifiableList( Arrays.asList((ds, cs) -> { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 18872d00d54a0..48cbef6831a2b 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -7,8 +7,15 @@ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamMetadata; +import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -19,39 +26,89 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.collection.IsIterableContainingInOrder.contains; public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - IndexVersion createdWith = IndexVersion.fromId(1000099); + IndexVersion createdWith = IndexVersion.fromId(7170099); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings(settings(createdWith)) .numberOfShards(1) .numberOfReplicas(0) .build(); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(indexMetadata, true)) + .build(); DeprecationIssue expected = new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 7.0", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + "breaking-changes-8.0.html", + "Old index with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + createdWith.toReleaseVersion(), false, null ); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); assertEquals(singletonList(expected), issues); } + public void testOldIndicesCheckDataStreamIndex() { + IndexVersion createdWith = IndexVersion.fromId(7170099); + IndexMetadata indexMetadata = IndexMetadata.builder(".ds-test") + .settings(settings(createdWith).put("index.hidden", true)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + DataStream dataStream = new DataStream( + randomAlphaOfLength(10), + List.of(indexMetadata.getIndex()), + randomNegativeLong(), + Map.of(), + randomBoolean(), + false, + false, + randomBoolean(), + randomFrom(IndexMode.values()), + null, + randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null), + List.of(), + randomBoolean(), + null + ); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata( + Metadata.builder() + .put(indexMetadata, true) + .customs( + Map.of( + DataStreamMetadata.TYPE, + new DataStreamMetadata( + ImmutableOpenMap.builder(Map.of("my-data-stream", dataStream)).build(), + ImmutableOpenMap.of() + ) + ) + ) + ) + .build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); + assertThat(issues.size(), equalTo(0)); + } + public void testTranslogRetentionSettings() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -81,7 +138,10 @@ public void testDefaultTranslogRetentionSettings() { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat(issues, empty()); } @@ -89,7 +149,10 @@ public void testIndexDataPathSetting() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexMetadata.INDEX_DATA_PATH_SETTING.getKey(), createTempDir()); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); final String expectedUrl = "https://www.elastic.co/guide/en/elasticsearch/reference/7.13/breaking-changes-7.13.html#deprecate-shared-data-path-setting"; assertThat( @@ -111,7 +174,10 @@ public void testSimpleFSSetting() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs"); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -133,7 +199,10 @@ public void testFrozenIndex() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(FrozenEngine.INDEX_FROZEN.getKey(), true); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -175,7 +244,10 @@ public void testCamelCaseDeprecation() throws IOException { false, null ); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(simpleIndex)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(simpleIndex, ClusterState.EMPTY_STATE) + ); assertThat(issues, hasItem(expected)); } } diff --git a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle index 6b1c7e42c0fde..c4f2a239d48e2 100644 --- a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle @@ -29,7 +29,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter("8.10.0") && bwcVersion != VersionProperties.elasticsearchVersion } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> def yamlRestTest = tasks.register("v${bwcVersion}#yamlRestTest", StandaloneRestIntegTestTask) { usesDefaultDistribution() diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index ba5ac7b0c7317..c5cfbea000ebe 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,7 +32,7 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/downsample/qa/with-security/build.gradle b/x-pack/plugin/downsample/qa/with-security/build.gradle index 5eed735950187..849c242f372bd 100644 --- a/x-pack/plugin/downsample/qa/with-security/build.gradle +++ b/x-pack/plugin/downsample/qa/with-security/build.gradle @@ -28,7 +28,7 @@ testClusters.configureEach { user username: 'elastic_admin', password: 'admin-password' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/enrich/qa/rest-with-security/build.gradle b/x-pack/plugin/enrich/qa/rest-with-security/build.gradle index 69fec4ad32c74..844cfcc61adff 100644 --- a/x-pack/plugin/enrich/qa/rest-with-security/build.gradle +++ b/x-pack/plugin/enrich/qa/rest-with-security/build.gradle @@ -6,7 +6,7 @@ dependencies { javaRestTestImplementation project(path: xpackModule('core')) javaRestTestImplementation project(path: xpackModule('enrich:qa:common')) } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/enrich/qa/rest/build.gradle b/x-pack/plugin/enrich/qa/rest/build.gradle index f96eff5f933c4..637ab21a98fd7 100644 --- a/x-pack/plugin/enrich/qa/rest/build.gradle +++ b/x-pack/plugin/enrich/qa/rest/build.gradle @@ -19,7 +19,7 @@ dependencies { javaRestTestImplementation project(path: xpackModule('enrich:qa:common')) } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle b/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle index e84adf0c0325d..47a1ffaa37fa4 100644 --- a/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle @@ -17,7 +17,7 @@ dependencies { javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"), "javaRestTest")) } -BuildParams.bwcVersions.withWireCompatible(v -> v.after("8.8.0")) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(v -> v.after("8.8.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml index 172d38cce5384..0b98182b39602 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: [ "gte_v8.10.0" ] - reason: Introduced in 8.10.0 - do: query_rules.put_ruleset: ruleset_id: test-query-ruleset-3 @@ -222,7 +219,7 @@ teardown: body: rules: - rule_id: query-rule-id1 - type: pinned + type: exclude criteria: - type: exact metadata: query_string @@ -307,3 +304,89 @@ teardown: - match: { error.type: 'security_exception' } +--- +'List query rulesets - include rule types': + - requires: + cluster_features: [ "query_rule_list_types" ] + reason: 'List responses updated in 8.15.5 and 8.16.1' + + - do: + query_rules.put_ruleset: + ruleset_id: a-test-query-ruleset-with-lots-of-criteria + body: + rules: + - rule_id: query-rule-id1 + type: exclude + criteria: + - type: exact + metadata: query_string + values: [ puggles ] + - type: gt + metadata: year + values: [ 2023 ] + actions: + ids: + - 'id1' + - 'id2' + - rule_id: query-rule-id2 + type: pinned + criteria: + - type: exact + metadata: query_string + values: [ pug ] + actions: + ids: + - 'id3' + - 'id4' + - rule_id: query-rule-id3 + type: pinned + criteria: + - type: fuzzy + metadata: query_string + values: [ puggles ] + actions: + ids: + - 'id5' + - 'id6' + - rule_id: query-rule-id4 + type: pinned + criteria: + - type: always + actions: + ids: + - 'id7' + - 'id8' + - rule_id: query-rule-id5 + type: pinned + criteria: + - type: prefix + metadata: query_string + values: [ pug ] + - type: suffix + metadata: query_string + values: [ gle ] + actions: + ids: + - 'id9' + - 'id10' + + - do: + query_rules.list_rulesets: + from: 0 + size: 1 + + - match: { count: 4 } + + # Alphabetical order by ruleset_id for results + - match: { results.0.ruleset_id: "a-test-query-ruleset-with-lots-of-criteria" } + - match: { results.0.rule_total_count: 5 } + - match: + results.0.rule_criteria_types_counts: + exact: 2 + gt: 1 + fuzzy: 1 + prefix: 1 + suffix: 1 + always: 1 + - match: { results.0.rule_type_counts: { pinned: 4, exclude: 1 } } + diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index ae8e63bdb6420..86882a28ec39f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -12,6 +12,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; +import org.elasticsearch.xpack.application.rules.action.ListQueryRulesetsAction; import org.elasticsearch.xpack.application.rules.retriever.QueryRuleRetrieverBuilder; import java.util.Map; @@ -23,7 +24,11 @@ public class EnterpriseSearchFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(QUERY_RULES_TEST_API, QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED); + return Set.of( + QUERY_RULES_TEST_API, + QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED, + ListQueryRulesetsAction.QUERY_RULE_LIST_TYPES + ); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java index c079892ccb2b6..7683ea7cb28a7 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.application.analytics.action.GetAnalyticsCollectionAction; -import org.elasticsearch.xpack.application.rules.QueryRuleCriteriaType; import org.elasticsearch.xpack.application.rules.QueryRulesIndexService; import org.elasticsearch.xpack.application.rules.QueryRulesetListItem; import org.elasticsearch.xpack.application.rules.action.ListQueryRulesetsAction; @@ -41,7 +40,6 @@ import org.elasticsearch.xpack.core.application.EnterpriseSearchFeatureSetUsage; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.IntSummaryStatistics; import java.util.List; @@ -226,20 +224,29 @@ private void addQueryRulesetUsage(ListQueryRulesetsAction.Response response, Map List results = response.queryPage().results(); IntSummaryStatistics ruleStats = results.stream().mapToInt(QueryRulesetListItem::ruleTotalCount).summaryStatistics(); - Map criteriaTypeCountMap = new EnumMap<>(QueryRuleCriteriaType.class); - results.stream() - .flatMap(result -> result.criteriaTypeToCountMap().entrySet().stream()) - .forEach(entry -> criteriaTypeCountMap.merge(entry.getKey(), entry.getValue(), Integer::sum)); + Map ruleCriteriaTypeCountMap = new HashMap<>(); + Map ruleTypeCountMap = new HashMap<>(); - Map rulesTypeCountMap = new HashMap<>(); - criteriaTypeCountMap.forEach((criteriaType, count) -> rulesTypeCountMap.put(criteriaType.name().toLowerCase(Locale.ROOT), count)); + results.forEach(result -> { + populateCounts(ruleCriteriaTypeCountMap, result.criteriaTypeToCountMap()); + populateCounts(ruleTypeCountMap, result.ruleTypeToCountMap()); + }); queryRulesUsage.put(TOTAL_COUNT, response.queryPage().count()); queryRulesUsage.put(TOTAL_RULE_COUNT, ruleStats.getSum()); queryRulesUsage.put(MIN_RULE_COUNT, results.isEmpty() ? 0 : ruleStats.getMin()); queryRulesUsage.put(MAX_RULE_COUNT, results.isEmpty() ? 0 : ruleStats.getMax()); - if (rulesTypeCountMap.isEmpty() == false) { - queryRulesUsage.put(RULE_CRITERIA_TOTAL_COUNTS, rulesTypeCountMap); + if (ruleCriteriaTypeCountMap.isEmpty() == false) { + queryRulesUsage.put(RULE_CRITERIA_TOTAL_COUNTS, ruleCriteriaTypeCountMap); + } + if (ruleTypeCountMap.isEmpty() == false) { + queryRulesUsage.put(EnterpriseSearchFeatureSetUsage.RULE_TYPE_TOTAL_COUNTS, ruleTypeCountMap); } } + + private void populateCounts(Map targetMap, Map, Integer> sourceMap) { + sourceMap.forEach( + (key, value) -> targetMap.merge(key.name().toLowerCase(Locale.ROOT), value, (v1, v2) -> (Integer) v1 + (Integer) v2) + ); + } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java index 3ce51ae5d832d..9b264a2cc41cf 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java @@ -445,6 +445,7 @@ private static QueryRulesetListItem hitToQueryRulesetListItem(SearchHit searchHi final List> rules = ((List>) sourceMap.get(QueryRuleset.RULES_FIELD.getPreferredName())); final int numRules = rules.size(); final Map queryRuleCriteriaTypeToCountMap = new EnumMap<>(QueryRuleCriteriaType.class); + final Map ruleTypeToCountMap = new EnumMap<>(QueryRule.QueryRuleType.class); for (LinkedHashMap rule : rules) { @SuppressWarnings("unchecked") List> criteriaList = ((List>) rule.get(QueryRule.CRITERIA_FIELD.getPreferredName())); @@ -453,9 +454,12 @@ private static QueryRulesetListItem hitToQueryRulesetListItem(SearchHit searchHi final QueryRuleCriteriaType queryRuleCriteriaType = QueryRuleCriteriaType.type(criteriaType); queryRuleCriteriaTypeToCountMap.compute(queryRuleCriteriaType, (k, v) -> v == null ? 1 : v + 1); } + final String ruleType = ((String) rule.get(QueryRule.TYPE_FIELD.getPreferredName())); + final QueryRule.QueryRuleType queryRuleType = QueryRule.QueryRuleType.queryRuleType(ruleType); + ruleTypeToCountMap.compute(queryRuleType, (k, v) -> v == null ? 1 : v + 1); } - return new QueryRulesetListItem(rulesetId, numRules, queryRuleCriteriaTypeToCountMap); + return new QueryRulesetListItem(rulesetId, numRules, queryRuleCriteriaTypeToCountMap, ruleTypeToCountMap); } public record QueryRulesetResult(List rulesets, long totalResults) {} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index f3bc07387512f..3a61c848d3813 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -32,10 +32,12 @@ public class QueryRulesetListItem implements Writeable, ToXContentObject { public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); public static final ParseField RULE_CRITERIA_TYPE_COUNTS_FIELD = new ParseField("rule_criteria_types_counts"); + public static final ParseField RULE_TYPE_COUNTS_FIELD = new ParseField("rule_type_counts"); private final String rulesetId; private final int ruleTotalCount; private final Map criteriaTypeToCountMap; + private final Map ruleTypeToCountMap; /** * Constructs a QueryRulesetListItem. @@ -44,11 +46,17 @@ public class QueryRulesetListItem implements Writeable, ToXContentObject { * @param ruleTotalCount The number of rules contained within the ruleset. * @param criteriaTypeToCountMap A map of criteria type to the number of rules of that type. */ - public QueryRulesetListItem(String rulesetId, int ruleTotalCount, Map criteriaTypeToCountMap) { + public QueryRulesetListItem( + String rulesetId, + int ruleTotalCount, + Map criteriaTypeToCountMap, + Map ruleTypeToCountMap + ) { Objects.requireNonNull(rulesetId, "rulesetId cannot be null on a QueryRuleListItem"); this.rulesetId = rulesetId; this.ruleTotalCount = ruleTotalCount; this.criteriaTypeToCountMap = criteriaTypeToCountMap; + this.ruleTypeToCountMap = ruleTypeToCountMap; } public QueryRulesetListItem(StreamInput in) throws IOException { @@ -59,6 +67,14 @@ public QueryRulesetListItem(StreamInput in) throws IOException { } else { this.criteriaTypeToCountMap = Map.of(); } + TransportVersion streamTransportVersion = in.getTransportVersion(); + if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) + || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + || streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + this.ruleTypeToCountMap = in.readMap(m -> in.readEnum(QueryRule.QueryRuleType.class), StreamInput::readInt); + } else { + this.ruleTypeToCountMap = Map.of(); + } } @Override @@ -71,6 +87,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(criteriaType.name().toLowerCase(Locale.ROOT), criteriaTypeToCountMap.get(criteriaType)); } builder.endObject(); + builder.startObject(RULE_TYPE_COUNTS_FIELD.getPreferredName()); + for (QueryRule.QueryRuleType ruleType : ruleTypeToCountMap.keySet()) { + builder.field(ruleType.name().toLowerCase(Locale.ROOT), ruleTypeToCountMap.get(ruleType)); + } + builder.endObject(); builder.endObject(); return builder; } @@ -82,6 +103,12 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { out.writeMap(criteriaTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); } + TransportVersion streamTransportVersion = out.getTransportVersion(); + if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) + || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + || streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + out.writeMap(ruleTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); + } } /** @@ -106,6 +133,10 @@ public Map criteriaTypeToCountMap() { return criteriaTypeToCountMap; } + public Map ruleTypeToCountMap() { + return ruleTypeToCountMap; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -113,11 +144,12 @@ public boolean equals(Object o) { QueryRulesetListItem that = (QueryRulesetListItem) o; return ruleTotalCount == that.ruleTotalCount && Objects.equals(rulesetId, that.rulesetId) - && Objects.equals(criteriaTypeToCountMap, that.criteriaTypeToCountMap); + && Objects.equals(criteriaTypeToCountMap, that.criteriaTypeToCountMap) + && Objects.equals(ruleTypeToCountMap, that.ruleTypeToCountMap); } @Override public int hashCode() { - return Objects.hash(rulesetId, ruleTotalCount, criteriaTypeToCountMap); + return Objects.hash(rulesetId, ruleTotalCount, criteriaTypeToCountMap, ruleTypeToCountMap); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java index 11397583ce5b9..62f9f3fd46cc4 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -33,6 +34,8 @@ public class ListQueryRulesetsAction { public static final String NAME = "cluster:admin/xpack/query_rules/list"; public static final ActionType INSTANCE = new ActionType<>(NAME); + public static final NodeFeature QUERY_RULE_LIST_TYPES = new NodeFeature("query_rule_list_types"); + private ListQueryRulesetsAction() {/* no instances */} public static class Request extends ActionRequest implements ToXContentObject { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java index 9ef2f630b50bd..54a89d061de35 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -11,15 +11,14 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilderWrapper; import org.elasticsearch.search.retriever.RetrieverParserContext; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -129,11 +128,10 @@ public int rankWindowSize() { } @Override - protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { - var ret = super.createSearchSourceBuilder(pit, retrieverBuilder); - checkValidSort(ret.sorts()); - ret.query(new RuleQueryBuilder(ret.query(), matchCriteria, rulesetIds)); - return ret; + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder source) { + checkValidSort(source.sorts()); + source.query(new RuleQueryBuilder(source.query(), matchCriteria, rulesetIds)); + return source; } private static void checkValidSort(List> sortBuilders) { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java index 1099603e9be07..681b14e8be61c 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java @@ -61,7 +61,7 @@ public List routes() { }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), isLicensed ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, isLicensed ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java index 5ae0f51cb6112..27d5e240534b2 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java @@ -8,8 +8,10 @@ package org.elasticsearch.xpack.application.rules.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.application.EnterpriseSearchModuleTestUtils; +import org.elasticsearch.xpack.application.rules.QueryRule; import org.elasticsearch.xpack.application.rules.QueryRuleCriteriaType; import org.elasticsearch.xpack.application.rules.QueryRuleset; import org.elasticsearch.xpack.application.rules.QueryRulesetListItem; @@ -32,9 +34,13 @@ private static ListQueryRulesetsAction.Response randomQueryRulesetListItem() { QueryRuleset queryRuleset = EnterpriseSearchModuleTestUtils.randomQueryRuleset(); Map criteriaTypeToCountMap = Map.of( randomFrom(QueryRuleCriteriaType.values()), - randomIntBetween(0, 10) + randomIntBetween(1, 10) ); - return new QueryRulesetListItem(queryRuleset.id(), queryRuleset.rules().size(), criteriaTypeToCountMap); + Map ruleTypeToCountMap = Map.of( + randomFrom(QueryRule.QueryRuleType.values()), + randomIntBetween(1, 10) + ); + return new QueryRulesetListItem(queryRuleset.id(), queryRuleset.rules().size(), criteriaTypeToCountMap, ruleTypeToCountMap); }), randomLongBetween(0, 1000)); } @@ -53,12 +59,22 @@ protected ListQueryRulesetsAction.Response mutateInstanceForVersion( ListQueryRulesetsAction.Response instance, TransportVersion version ) { - if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { + if (version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) + || version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + || version.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { return instance; + } else if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { + List updatedResults = new ArrayList<>(); + for (QueryRulesetListItem listItem : instance.queryPage.results()) { + updatedResults.add( + new QueryRulesetListItem(listItem.rulesetId(), listItem.ruleTotalCount(), listItem.criteriaTypeToCountMap(), Map.of()) + ); + } + return new ListQueryRulesetsAction.Response(updatedResults, instance.queryPage.count()); } else { List updatedResults = new ArrayList<>(); for (QueryRulesetListItem listItem : instance.queryPage.results()) { - updatedResults.add(new QueryRulesetListItem(listItem.rulesetId(), listItem.ruleTotalCount(), Map.of())); + updatedResults.add(new QueryRulesetListItem(listItem.rulesetId(), listItem.ruleTotalCount(), Map.of(), Map.of())); } return new ListQueryRulesetsAction.Response(updatedResults, instance.queryPage.count()); } diff --git a/x-pack/plugin/eql/build.gradle b/x-pack/plugin/eql/build.gradle index cda236c3d02ae..b0b5fefa37fcd 100644 --- a/x-pack/plugin/eql/build.gradle +++ b/x-pack/plugin/eql/build.gradle @@ -32,7 +32,7 @@ dependencies { * Enable QA/rest integration tests for snapshot builds only * * TODO: Enable for all builds upon this feature release * ****************************************************************/ -if (BuildParams.isSnapshotBuild()) { +if (buildParams.isSnapshotBuild()) { addQaCheckDependencies(project) } diff --git a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle index a16c24c852377..cbea0896264d5 100644 --- a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle +++ b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle @@ -15,7 +15,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /** * We execute tests 3 times. diff --git a/x-pack/plugin/eql/qa/correctness/build.gradle b/x-pack/plugin/eql/qa/correctness/build.gradle index d245dc444f0b8..a791356499f5c 100644 --- a/x-pack/plugin/eql/qa/correctness/build.gradle +++ b/x-pack/plugin/eql/qa/correctness/build.gradle @@ -41,7 +41,7 @@ def runTaskCluster = testClusters.register('runTask') { tasks.named('javaRestTest').configure { onlyIf("FIPS mode disabled and service accoutn file available") { - serviceAccountFile && BuildParams.inFipsJvm == false + serviceAccountFile && buildParams.inFipsJvm == false } testLogging { diff --git a/x-pack/plugin/eql/qa/mixed-node/build.gradle b/x-pack/plugin/eql/qa/mixed-node/build.gradle index 8b9e082215fc4..d3aa227c7ef88 100644 --- a/x-pack/plugin/eql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/eql/qa/mixed-node/build.gradle @@ -13,7 +13,7 @@ dependencies { tasks.named("javaRestTest").configure { enabled = false } -BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.0") && +buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.0") && v != VersionProperties.getElasticsearchVersion()) { bwcVersion, baseName -> def cluster = testClusters.register(baseName) { versions = [bwcVersion.toString(), project.version] diff --git a/x-pack/plugin/eql/qa/rest/build.gradle b/x-pack/plugin/eql/qa/rest/build.gradle index d5b0cc42091f3..00f196d863f2e 100644 --- a/x-pack/plugin/eql/qa/rest/build.gradle +++ b/x-pack/plugin/eql/qa/rest/build.gradle @@ -30,7 +30,7 @@ tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/eql/qa/security/build.gradle b/x-pack/plugin/eql/qa/security/build.gradle index 0641c47273f0e..1f0f949cab706 100644 --- a/x-pack/plugin/eql/qa/security/build.gradle +++ b/x-pack/plugin/eql/qa/security/build.gradle @@ -10,7 +10,7 @@ tasks.named('javaRestTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java index 28bbf956fd71e..e63cc1fcf25fe 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java @@ -6,7 +6,9 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate; +import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; @@ -113,4 +115,43 @@ public static List subtract(List from, List } return diff.isEmpty() ? emptyList() : diff; } + + /** + * Given a list of expressions of predicates, extract a new expression of + * all the common ones and return it, along the original list with the + * common ones removed. + *

+ * Example: for ['field1 > 0 AND field2 > 0', 'field1 > 0 AND field3 > 0', + * 'field1 > 0'], the function will return 'field1 > 0' as the common + * predicate expression and ['field2 > 0', 'field3 > 0', Literal.TRUE] as + * the left predicates list. + * + * @param expressions list of expressions to extract common predicates from. + * @return a tuple having as the first element an expression of the common + * predicates and as the second element the list of expressions with the + * common predicates removed. If there are no common predicates, `null` will + * be returned as the first element and the original list as the second. If + * for one of the expressions in the input list, nothing is left after + * trimming the common predicates, it will be replaced with Literal.TRUE. + */ + public static Tuple> extractCommon(List expressions) { + List common = null; + List> splitAnds = new ArrayList<>(expressions.size()); + for (var expression : expressions) { + var split = splitAnd(expression); + common = common == null ? split : inCommon(split, common); + if (common.isEmpty()) { + return Tuple.tuple(null, expressions); + } + splitAnds.add(split); + } + + List trimmed = new ArrayList<>(expressions.size()); + final List finalCommon = common; + splitAnds.forEach(split -> { + var subtracted = subtract(split, finalCommon); + trimmed.add(subtracted.isEmpty() ? Literal.TRUE : combineAnd(subtracted)); + }); + return Tuple.tuple(combineAnd(common), trimmed); + } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 9708a3ea0db85..347e6b43099fc 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -29,7 +29,6 @@ import java.util.function.Function; import static java.util.stream.Collectors.toMap; -import static java.util.stream.Collectors.toUnmodifiableMap; import static org.elasticsearch.xpack.esql.core.util.PlanStreamInput.readCachedStringWithVersionCheck; import static org.elasticsearch.xpack.esql.core.util.PlanStreamOutput.writeCachedStringWithVersionCheck; @@ -276,7 +275,7 @@ public enum DataType { private static final Collection STRING_TYPES = DataType.types().stream().filter(DataType::isString).toList(); - private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); + private static final Map NAME_TO_TYPE; private static final Map ES_TO_TYPE; @@ -287,6 +286,10 @@ public enum DataType { map.put("point", DataType.CARTESIAN_POINT); map.put("shape", DataType.CARTESIAN_SHAPE); ES_TO_TYPE = Collections.unmodifiableMap(map); + // DATETIME has different esType and typeName, add an entry in NAME_TO_TYPE with date as key + map = TYPES.stream().collect(toMap(DataType::typeName, t -> t)); + map.put("date", DataType.DATETIME); + NAME_TO_TYPE = Collections.unmodifiableMap(map); } private static final Map NAME_OR_ALIAS_TO_TYPE; diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index 1cf39f06f77c8..6541fcd84afef 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -74,7 +74,7 @@ interface Injected { } tasks.named("test").configure { - if (BuildParams.isCi() == false) { + if (buildParams.isCi() == false) { systemProperty 'generateDocs', true def injected = project.objects.newInstance(Injected) doFirst { @@ -145,7 +145,7 @@ tasks.named("test").configure { * Enable QA/rest integration tests for snapshot builds only * * TODO: Enable for all builds upon this feature release * ****************************************************************/ -if (BuildParams.isSnapshotBuild()) { +if (buildParams.isSnapshotBuild()) { addQaCheckDependencies(project) } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMaxFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMaxFactory.java new file mode 100644 index 0000000000000..ba7de22b1b821 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMaxFactory.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.search.MultiValueMode; + +import java.io.IOException; +import java.util.List; +import java.util.function.Function; + +/** + * Factory that generates an operator that finds the max value of a field using the {@link LuceneMinMaxOperator}. + */ +public final class LuceneMaxFactory extends LuceneOperator.Factory { + + public enum NumberType implements LuceneMinMaxOperator.NumberType { + INTEGER { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantIntBlockWith(Math.toIntExact(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantIntBlockWith(Integer.MIN_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + FLOAT { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantFloatBlockWith(NumericUtils.sortableIntToFloat(Math.toIntExact(result)), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantFloatBlockWith(-Float.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + LONG { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantLongBlockWith(result, pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantLongBlockWith(Long.MIN_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }, + DOUBLE { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(NumericUtils.sortableLongToDouble(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(-Double.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }; + + public final NumericDocValues multiValueMode(SortedNumericDocValues sortedNumericDocValues) { + return MultiValueMode.MAX.select(sortedNumericDocValues); + } + + public final long fromPointValues(PointValues pointValues) throws IOException { + return bytesToLong(pointValues.getMaxPackedValue()); + } + + public final long evaluate(long value1, long value2) { + return Math.max(value1, value2); + } + + abstract long bytesToLong(byte[] bytes); + } + + private final String fieldName; + private final NumberType numberType; + + public LuceneMaxFactory( + List contexts, + Function queryFunction, + DataPartitioning dataPartitioning, + int taskConcurrency, + String fieldName, + NumberType numberType, + int limit + ) { + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); + this.fieldName = fieldName; + this.numberType = numberType; + } + + @Override + public SourceOperator get(DriverContext driverContext) { + return new LuceneMinMaxOperator(driverContext.blockFactory(), sliceQueue, fieldName, numberType, limit, Long.MIN_VALUE); + } + + @Override + public String describe() { + return "LuceneMaxOperator[type = " + + numberType.name() + + ", dataPartitioning = " + + dataPartitioning + + ", fieldName = " + + fieldName + + ", limit = " + + limit + + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinFactory.java new file mode 100644 index 0000000000000..e3c6c8310373d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinFactory.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.search.MultiValueMode; + +import java.io.IOException; +import java.util.List; +import java.util.function.Function; + +/** + * Factory that generates an operator that finds the min value of a field using the {@link LuceneMinMaxOperator}. + */ +public final class LuceneMinFactory extends LuceneOperator.Factory { + + public enum NumberType implements LuceneMinMaxOperator.NumberType { + INTEGER { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantIntBlockWith(Math.toIntExact(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantIntBlockWith(Integer.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + FLOAT { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantFloatBlockWith(NumericUtils.sortableIntToFloat(Math.toIntExact(result)), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantFloatBlockWith(Float.POSITIVE_INFINITY, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + LONG { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantLongBlockWith(result, pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantLongBlockWith(Long.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }, + DOUBLE { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(NumericUtils.sortableLongToDouble(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(Double.POSITIVE_INFINITY, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }; + + public final NumericDocValues multiValueMode(SortedNumericDocValues sortedNumericDocValues) { + return MultiValueMode.MIN.select(sortedNumericDocValues); + } + + public final long fromPointValues(PointValues pointValues) throws IOException { + return bytesToLong(pointValues.getMinPackedValue()); + } + + public final long evaluate(long value1, long value2) { + return Math.min(value1, value2); + } + + abstract long bytesToLong(byte[] bytes); + } + + private final String fieldName; + private final NumberType numberType; + + public LuceneMinFactory( + List contexts, + Function queryFunction, + DataPartitioning dataPartitioning, + int taskConcurrency, + String fieldName, + NumberType numberType, + int limit + ) { + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); + this.fieldName = fieldName; + this.numberType = numberType; + } + + @Override + public SourceOperator get(DriverContext driverContext) { + return new LuceneMinMaxOperator(driverContext.blockFactory(), sliceQueue, fieldName, numberType, limit, Long.MAX_VALUE); + } + + @Override + public String describe() { + return "LuceneMinOperator[type = " + + numberType.name() + + ", dataPartitioning = " + + dataPartitioning + + ", fieldName = " + + fieldName + + ", limit = " + + limit + + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java new file mode 100644 index 0000000000000..c41c31345df4e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.util.Bits; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.MultiValueMode; + +import java.io.IOException; + +/** + * Operator that finds the min or max value of a field using Lucene searches + * and returns always one entry that mimics the min/max aggregation internal state: + * 1. the min/max with a type depending on the {@link NumberType} (The initial value if no doc is seen) + * 2. a bool flag (seen) that is true if at least one document has been matched, otherwise false + *

+ * It works for fields that index data using lucene {@link PointValues} and/or {@link SortedNumericDocValues}. + * It assumes that {@link SortedNumericDocValues} are always present. + */ +final class LuceneMinMaxOperator extends LuceneOperator { + + sealed interface NumberType permits LuceneMinFactory.NumberType, LuceneMaxFactory.NumberType { + + /** Extract the competitive value from the {@link PointValues} */ + long fromPointValues(PointValues pointValues) throws IOException; + + /** Wraps the provided {@link SortedNumericDocValues} with a {@link MultiValueMode} */ + NumericDocValues multiValueMode(SortedNumericDocValues sortedNumericDocValues); + + /** Return the competitive value between {@code value1} and {@code value2} */ + long evaluate(long value1, long value2); + + /** Build the corresponding block */ + Block buildResult(BlockFactory blockFactory, long result, int pageSize); + + /** Build the corresponding block */ + Block buildEmptyResult(BlockFactory blockFactory, int pageSize); + } + + private static final int PAGE_SIZE = 1; + + private boolean seen = false; + private int remainingDocs; + private long result; + + private final NumberType numberType; + + private final String fieldName; + + LuceneMinMaxOperator( + BlockFactory blockFactory, + LuceneSliceQueue sliceQueue, + String fieldName, + NumberType numberType, + int limit, + long initialResult + ) { + super(blockFactory, PAGE_SIZE, sliceQueue); + this.remainingDocs = limit; + this.numberType = numberType; + this.fieldName = fieldName; + this.result = initialResult; + } + + @Override + public boolean isFinished() { + return doneCollecting || remainingDocs == 0; + } + + @Override + public void finish() { + doneCollecting = true; + } + + @Override + public Page getCheckedOutput() throws IOException { + if (isFinished()) { + assert remainingDocs <= 0 : remainingDocs; + return null; + } + final long start = System.nanoTime(); + try { + final LuceneScorer scorer = getCurrentOrLoadNextScorer(); + // no scorer means no more docs + if (scorer == null) { + remainingDocs = 0; + } else { + final LeafReader reader = scorer.leafReaderContext().reader(); + final Query query = scorer.weight().getQuery(); + if (query == null || query instanceof MatchAllDocsQuery) { + final PointValues pointValues = reader.getPointValues(fieldName); + // only apply shortcut if we are visiting all documents, otherwise we need to trigger the search + // on doc values as that's the order they are visited without push down. + if (pointValues != null && pointValues.getDocCount() >= remainingDocs) { + final Bits liveDocs = reader.getLiveDocs(); + if (liveDocs == null) { + // In data partitioning, we might have got the same segment previous + // to this but with a different document range. And we're totally ignoring that range. + // We're just reading the min/max from the segment. That's sneaky, but it makes sense. + // And if we get another slice in the same segment we may as well skip it - + // we've already looked. + if (scorer.position() == 0) { + seen = true; + result = numberType.evaluate(result, numberType.fromPointValues(pointValues)); + if (remainingDocs != NO_LIMIT) { + remainingDocs -= pointValues.getDocCount(); + } + } + scorer.markAsDone(); + } + } + } + if (scorer.isDone() == false) { + // could not apply shortcut, trigger the search + final NumericDocValues values = numberType.multiValueMode(reader.getSortedNumericDocValues(fieldName)); + final LeafCollector leafCollector = new LeafCollector() { + @Override + public void setScorer(Scorable scorer) {} + + @Override + public void collect(int doc) throws IOException { + assert remainingDocs > 0; + remainingDocs--; + if (values.advanceExact(doc)) { + seen = true; + result = numberType.evaluate(result, values.longValue()); + } + } + }; + scorer.scoreNextRange(leafCollector, reader.getLiveDocs(), remainingDocs); + } + } + + Page page = null; + // emit only one page + if (remainingDocs <= 0 && pagesEmitted == 0) { + pagesEmitted++; + Block result = null; + BooleanBlock seen = null; + try { + result = this.seen + ? numberType.buildResult(blockFactory, this.result, PAGE_SIZE) + : numberType.buildEmptyResult(blockFactory, PAGE_SIZE); + seen = blockFactory.newConstantBooleanBlockWith(this.seen, PAGE_SIZE); + page = new Page(PAGE_SIZE, result, seen); + } finally { + if (page == null) { + Releasables.closeExpectNoException(result, seen); + } + } + } + return page; + } finally { + processingNanos += System.nanoTime() - start; + } + } + + @Override + protected void describe(StringBuilder sb) { + sb.append(", remainingDocs=").append(remainingDocs); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxDoubleOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxDoubleOperatorTests.java new file mode 100644 index 0000000000000..4cb113457b23f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxDoubleOperatorTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxDoubleOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.DOUBLE; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + double max = -Double.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new DoubleField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(newValue())); + } + + private double newValue() { + final double value = randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true); + max = Math.max(max, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(DoubleBlock.class)); + final DoubleBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getDouble(0), equalTo(-Double.MAX_VALUE)); + } else { + assertThat(db.getDouble(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxDoubleAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(DoubleBlock.class)); + final DoubleBlock db = (DoubleBlock) block; + if (exactResult) { + assertThat(db.getDouble(0), equalTo(max)); + } else { + assertThat(db.getDouble(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxFloatOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxFloatOperatorTests.java new file mode 100644 index 0000000000000..4a009a2d84c66 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxFloatOperatorTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxFloatAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxFloatOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.FLOAT; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + float max = -Float.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new FloatField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + private float newValue() { + final float value = randomFloatBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true); + max = Math.max(max, value); + return value; + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.floatToSortableInt(newValue())); + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(FloatBlock.class)); + final FloatBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getFloat(0), equalTo(-Float.MAX_VALUE)); + } else { + assertThat(db.getFloat(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxFloatAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(FloatBlock.class)); + final FloatBlock fb = (FloatBlock) block; + if (exactResult) { + assertThat(fb.getFloat(0), equalTo(max)); + } else { + assertThat(fb.getFloat(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxIntOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxIntOperatorTests.java new file mode 100644 index 0000000000000..a6118481ca43d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxIntOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxIntOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.INTEGER; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + int max = Integer.MIN_VALUE; + + @Override + public IndexableField newPointField() { + return new IntField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + private int newValue() { + final int value = randomInt(); + max = Math.max(max, value); + return value; + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(IntBlock.class)); + final IntBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getInt(0), equalTo(Integer.MIN_VALUE)); + } else { + assertThat(db.getInt(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxIntAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(IntBlock.class)); + final IntBlock ib = (IntBlock) block; + if (exactResult) { + assertThat(ib.getInt(0), equalTo(max)); + } else { + assertThat(ib.getInt(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxLongOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxLongOperatorTests.java new file mode 100644 index 0000000000000..894c8e862123e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxLongOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxLongOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.LONG; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + long max = Long.MIN_VALUE; + + @Override + public IndexableField newPointField() { + return new LongField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + private long newValue() { + final long value = randomLong(); + max = Math.max(max, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(LongBlock.class)); + final LongBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getLong(0), equalTo(Long.MIN_VALUE)); + } else { + assertThat(db.getLong(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxLongAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(LongBlock.class)); + final LongBlock lb = (LongBlock) block; + if (exactResult) { + assertThat(lb.getLong(0), equalTo(max)); + } else { + assertThat(lb.getLong(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxOperatorTestCase.java new file mode 100644 index 0000000000000..f5214dccbd00c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxOperatorTestCase.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.hamcrest.Matcher; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.matchesRegex; + +public abstract class LuceneMaxOperatorTestCase extends AnyOperatorTestCase { + + protected interface NumberTypeTest { + + IndexableField newPointField(); + + IndexableField newDocValuesField(); + + void assertPage(Page page); + + AggregatorFunction newAggregatorFunction(DriverContext context); + + void assertMaxValue(Block block, boolean exactResult); + + } + + protected abstract NumberTypeTest getNumberTypeTest(); + + protected abstract LuceneMaxFactory.NumberType getNumberType(); + + protected static final String FIELD_NAME = "field"; + private final Directory directory = newDirectory(); + private IndexReader reader; + + @After + public void closeIndex() throws IOException { + IOUtils.close(reader, directory); + } + + @Override + protected LuceneMaxFactory simple() { + return simple(getNumberTypeTest(), randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + } + + private LuceneMaxFactory simple(NumberTypeTest numberTypeTest, DataPartitioning dataPartitioning, int numDocs, int limit) { + final boolean enableShortcut = randomBoolean(); + final boolean enableMultiValue = randomBoolean(); + final int commitEvery = Math.max(1, numDocs / 10); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + + for (int d = 0; d < numDocs; d++) { + final var numValues = enableMultiValue ? randomIntBetween(1, 5) : 1; + final var doc = new Document(); + for (int i = 0; i < numValues; i++) { + if (enableShortcut) { + doc.add(numberTypeTest.newPointField()); + } else { + doc.add(numberTypeTest.newDocValuesField()); + } + } + writer.addDocument(doc); + if (d % commitEvery == 0) { + writer.commit(); + } + } + reader = writer.getReader(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + final ShardContext ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); + final Query query; + if (enableShortcut && randomBoolean()) { + query = new MatchAllDocsQuery(); + } else { + query = SortedNumericDocValuesField.newSlowRangeQuery(FIELD_NAME, Long.MIN_VALUE, Long.MAX_VALUE); + } + return new LuceneMaxFactory(List.of(ctx), c -> query, dataPartitioning, between(1, 8), FIELD_NAME, getNumberType(), limit); + } + + public void testSimple() { + testSimple(this::driverContext); + } + + public void testSimpleWithCranky() { + try { + testSimple(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testSimple(Supplier contexts) { + int size = between(1_000, 20_000); + int limit = randomBoolean() ? between(10, size) : Integer.MAX_VALUE; + testMax(contexts, size, limit); + } + + public void testEmpty() { + testEmpty(this::driverContext); + } + + public void testEmptyWithCranky() { + try { + testEmpty(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testEmpty(Supplier contexts) { + int limit = randomBoolean() ? between(10, 10000) : Integer.MAX_VALUE; + testMax(contexts, 0, limit); + } + + private void testMax(Supplier contexts, int size, int limit) { + DataPartitioning dataPartitioning = randomFrom(DataPartitioning.values()); + NumberTypeTest numberTypeTest = getNumberTypeTest(); + LuceneMaxFactory factory = simple(numberTypeTest, dataPartitioning, size, limit); + List results = new CopyOnWriteArrayList<>(); + List drivers = new ArrayList<>(); + int taskConcurrency = between(1, 8); + for (int i = 0; i < taskConcurrency; i++) { + DriverContext ctx = contexts.get(); + drivers.add(new Driver(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); + } + OperatorTestCase.runDriver(drivers); + assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); + + try (AggregatorFunction aggregatorFunction = numberTypeTest.newAggregatorFunction(contexts.get())) { + for (Page page : results) { + assertThat(page.getPositionCount(), is(1)); // one row + assertThat(page.getBlockCount(), is(2)); // two blocks + numberTypeTest.assertPage(page); + aggregatorFunction.addIntermediateInput(page); + } + + final Block[] result = new Block[1]; + try { + aggregatorFunction.evaluateFinal(result, 0, contexts.get()); + if (result[0].areAllValuesNull() == false) { + boolean exactResult = size <= limit; + numberTypeTest.assertMaxValue(result[0], exactResult); + } + } finally { + Releasables.close(result); + } + } + } + + @Override + protected final Matcher expectedToStringOfSimple() { + return matchesRegex("LuceneMinMaxOperator\\[maxPageSize = \\d+, remainingDocs=100]"); + } + + @Override + protected final Matcher expectedDescriptionOfSimple() { + return matchesRegex( + "LuceneMaxOperator\\[type = " + + getNumberType().name() + + ", dataPartitioning = (DOC|SHARD|SEGMENT), fieldName = " + + FIELD_NAME + + ", limit = 100]" + ); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinDoubleOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinDoubleOperatorTests.java new file mode 100644 index 0000000000000..5fef2d4897030 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinDoubleOperatorTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinDoubleOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.DOUBLE; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + double min = Double.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new DoubleField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(newValue())); + } + + private double newValue() { + final double value = randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(DoubleBlock.class)); + final DoubleBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getDouble(0), equalTo(Double.POSITIVE_INFINITY)); + } else { + assertThat(db.getDouble(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinDoubleAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(DoubleBlock.class)); + final DoubleBlock db = (DoubleBlock) block; + if (exactResult) { + assertThat(db.getDouble(0), equalTo(min)); + } else { + assertThat(db.getDouble(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinFloatOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinFloatOperatorTests.java new file mode 100644 index 0000000000000..41c8751c08a96 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinFloatOperatorTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinFloatAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinFloatOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.FLOAT; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + float min = Float.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new FloatField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.floatToSortableInt(newValue())); + } + + private float newValue() { + final float value = randomFloatBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(FloatBlock.class)); + final FloatBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + final float v = db.getFloat(0); + if (bb.getBoolean(0) == false) { + assertThat(db.getFloat(0), equalTo(Float.POSITIVE_INFINITY)); + } else { + assertThat(db.getFloat(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinFloatAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(FloatBlock.class)); + final FloatBlock fb = (FloatBlock) block; + if (exactResult) { + assertThat(fb.getFloat(0), equalTo(min)); + } else { + assertThat(fb.getFloat(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinIntegerOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinIntegerOperatorTests.java new file mode 100644 index 0000000000000..5d2c867f4f660 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinIntegerOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinIntegerOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.INTEGER; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + int min = Integer.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new IntField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + private int newValue() { + final int value = randomInt(); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(IntBlock.class)); + IntBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getInt(0), equalTo(Integer.MAX_VALUE)); + } else { + assertThat(db.getInt(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinIntAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(IntBlock.class)); + final IntBlock ib = (IntBlock) block; + if (exactResult) { + assertThat(ib.getInt(0), equalTo(min)); + } else { + assertThat(ib.getInt(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinLongOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinLongOperatorTests.java new file mode 100644 index 0000000000000..15c34f5853ae2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinLongOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinLongOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.LONG; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + long min = Long.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new LongField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + private long newValue() { + final long value = randomLong(); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(LongBlock.class)); + final LongBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getLong(0), equalTo(Long.MAX_VALUE)); + } else { + assertThat(db.getLong(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinLongAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(LongBlock.class)); + final LongBlock lb = (LongBlock) block; + if (exactResult) { + assertThat(lb.getLong(0), equalTo(min)); + } else { + assertThat(lb.getLong(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java new file mode 100644 index 0000000000000..493512bd83bec --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.hamcrest.Matcher; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.matchesRegex; + +public abstract class LuceneMinOperatorTestCase extends AnyOperatorTestCase { + + protected interface NumberTypeTest { + + IndexableField newPointField(); + + IndexableField newDocValuesField(); + + void assertPage(Page page); + + AggregatorFunction newAggregatorFunction(DriverContext context); + + void assertMinValue(Block block, boolean exactResult); + + } + + protected abstract NumberTypeTest getNumberTypeTest(); + + protected abstract LuceneMinFactory.NumberType getNumberType(); + + protected static final String FIELD_NAME = "field"; + private final Directory directory = newDirectory(); + private IndexReader reader; + + @After + public void closeIndex() throws IOException { + IOUtils.close(reader, directory); + } + + @Override + protected LuceneMinFactory simple() { + return simple(getNumberTypeTest(), randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + } + + private LuceneMinFactory simple(NumberTypeTest numberTypeTest, DataPartitioning dataPartitioning, int numDocs, int limit) { + final boolean enableShortcut = randomBoolean(); + final boolean enableMultiValue = randomBoolean(); + final int commitEvery = Math.max(1, numDocs / 10); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + + for (int d = 0; d < numDocs; d++) { + final var numValues = enableMultiValue ? randomIntBetween(1, 5) : 1; + final var doc = new Document(); + for (int i = 0; i < numValues; i++) { + if (enableShortcut) { + doc.add(numberTypeTest.newPointField()); + } else { + doc.add(numberTypeTest.newDocValuesField()); + } + } + writer.addDocument(doc); + if (d % commitEvery == 0) { + writer.commit(); + } + } + reader = writer.getReader(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + final ShardContext ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); + final Query query; + if (enableShortcut && randomBoolean()) { + query = new MatchAllDocsQuery(); + } else { + query = SortedNumericDocValuesField.newSlowRangeQuery(FIELD_NAME, Long.MIN_VALUE, Long.MAX_VALUE); + } + return new LuceneMinFactory(List.of(ctx), c -> query, dataPartitioning, between(1, 8), FIELD_NAME, getNumberType(), limit); + } + + public void testSimple() { + testSimple(this::driverContext); + } + + public void testSimpleWithCranky() { + try { + testSimple(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testSimple(Supplier contexts) { + int size = between(1_000, 20_000); + int limit = randomBoolean() ? between(10, size) : Integer.MAX_VALUE; + testMin(contexts, size, limit); + } + + public void testEmpty() { + testEmpty(this::driverContext); + } + + public void testEmptyWithCranky() { + try { + testEmpty(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testEmpty(Supplier contexts) { + int limit = randomBoolean() ? between(10, 10000) : Integer.MAX_VALUE; + testMin(contexts, 0, limit); + } + + private void testMin(Supplier contexts, int size, int limit) { + DataPartitioning dataPartitioning = randomFrom(DataPartitioning.values()); + NumberTypeTest numberTypeTest = getNumberTypeTest(); + LuceneMinFactory factory = simple(numberTypeTest, dataPartitioning, size, limit); + List results = new CopyOnWriteArrayList<>(); + List drivers = new ArrayList<>(); + int taskConcurrency = between(1, 8); + for (int i = 0; i < taskConcurrency; i++) { + DriverContext ctx = contexts.get(); + drivers.add(new Driver(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); + } + OperatorTestCase.runDriver(drivers); + assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); + + try (AggregatorFunction aggregatorFunction = numberTypeTest.newAggregatorFunction(contexts.get())) { + for (Page page : results) { + assertThat(page.getPositionCount(), is(1)); // one row + assertThat(page.getBlockCount(), is(2)); // two blocks + numberTypeTest.assertPage(page); + aggregatorFunction.addIntermediateInput(page); + } + + final Block[] result = new Block[1]; + try { + aggregatorFunction.evaluateFinal(result, 0, contexts.get()); + if (result[0].areAllValuesNull() == false) { + boolean exactResult = size <= limit; + numberTypeTest.assertMinValue(result[0], exactResult); + } + } finally { + Releasables.close(result); + } + } + } + + @Override + protected final Matcher expectedToStringOfSimple() { + return matchesRegex("LuceneMinMaxOperator\\[maxPageSize = \\d+, remainingDocs=100]"); + } + + @Override + protected final Matcher expectedDescriptionOfSimple() { + return matchesRegex( + "LuceneMinOperator\\[type = " + + getNumberType().name() + + ", dataPartitioning = (DOC|SHARD|SEGMENT), fieldName = " + + FIELD_NAME + + ", limit = 100]" + ); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index b126ca8af0e31..4863eea5d5ca3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.hamcrest.Matcher; import org.junit.After; @@ -363,12 +364,12 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingPathFields = new RoutingPathFields(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1] instanceof Number n) { - builder.addLong(dimensions[i].toString(), n.longValue()); + routingPathFields.addLong(dimensions[i].toString(), n.longValue()); } else { - builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); } } @@ -382,7 +383,9 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens } } // Use legacy tsid to make tests easier to understand: - fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + fields.add( + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) + ); iw.addDocument(fields); } } diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index fb47255e8d52e..68c0e8e30f814 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -31,7 +31,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> def javaRestTest = tasks.register("v${bwcVersion}#javaRestTest", StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle index aa19371685ce1..2c432eb94ebf1 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle @@ -15,6 +15,7 @@ apply plugin: 'elasticsearch.bwc-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) javaRestTestImplementation project(xpackModule('esql:qa:server')) + javaRestTestImplementation project(xpackModule('esql')) } def supportedVersion = bwcVersion -> { @@ -22,7 +23,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter(Version.fromString("8.13.0")); } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 62391c8ca001a..60eecbb7658b7 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -45,6 +45,10 @@ import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.ENRICH_SOURCE_INDICES; import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -101,16 +105,25 @@ public MultiClusterSpecIT( @Override protected void shouldSkipTest(String testName) throws IOException { + boolean remoteMetadata = testCase.requiredCapabilities.contains(METADATA_FIELDS_REMOTE_TEST.capabilityName()); + if (remoteMetadata) { + // remove the capability from the test to enable it + testCase.requiredCapabilities = testCase.requiredCapabilities.stream() + .filter(c -> c.equals("metadata_fields_remote_test") == false) + .toList(); + } super.shouldSkipTest(testName); checkCapabilities(remoteClusterClient(), remoteFeaturesService(), testName, testCase); - assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); + // Do not run tests including "METADATA _index" unless marked with metadata_fields_remote_test, + // because they may produce inconsistent results with multiple clusters. + assumeFalse("can't test with _index metadata", (remoteMetadata == false) && hasIndexMetadata(testCase.query)); assumeTrue( "Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, instructions, Clusters.oldVersion()) ); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("join_planning_v1")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -151,6 +164,9 @@ protected RestClient buildClient(Settings settings, HttpHost[] localHosts) throw return twoClients(localClient, remoteClient); } + // These indices are used in metadata tests so we want them on remote only for consistency + public static final List METADATA_INDICES = List.of("employees", "apps", "ul_logs"); + /** * Creates a new mock client that dispatches every request to both the local and remote clusters, excluding _bulk and _query requests. * - '_bulk' requests are randomly sent to either the local or remote cluster to populate data. Some spec tests, such as AVG, @@ -166,6 +182,8 @@ static RestClient twoClients(RestClient localClient, RestClient remoteClient) th String endpoint = request.getEndpoint(); if (endpoint.startsWith("/_query")) { return localClient.performRequest(request); + } else if (endpoint.endsWith("/_bulk") && METADATA_INDICES.stream().anyMatch(i -> endpoint.equals("/" + i + "/_bulk"))) { + return remoteClient.performRequest(request); } else if (endpoint.endsWith("/_bulk") && ENRICH_SOURCE_INDICES.stream().noneMatch(i -> endpoint.equals("/" + i + "/_bulk"))) { return bulkClient.performRequest(request); } else { @@ -203,6 +221,9 @@ static Request[] cloneRequests(Request orig, int numClones) throws IOException { return clones; } + /** + * Convert FROM employees ... => FROM *:employees,employees + */ static CsvSpecReader.CsvTestCase convertToRemoteIndices(CsvSpecReader.CsvTestCase testCase) { String query = testCase.query; String[] commands = query.split("\\|"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index b8569ead94509..3be3decaf351c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -716,3 +716,47 @@ FROM employees 2 |1985-10-01T00:00:00.000Z 4 |1985-11-01T00:00:00.000Z ; + +bucketByWeekInString +required_capability: implicit_casting_string_literal_to_temporal_amount +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| STATS hires_per_week = COUNT(*) BY week = BUCKET(hire_date, "1 week") +| SORT week +; + + hires_per_week:long | week:date +2 |1985-02-18T00:00:00.000Z +1 |1985-05-13T00:00:00.000Z +1 |1985-07-08T00:00:00.000Z +1 |1985-09-16T00:00:00.000Z +2 |1985-10-14T00:00:00.000Z +4 |1985-11-18T00:00:00.000Z +; + +bucketByMinuteInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| STATS min = min(@timestamp), max = MAX(@timestamp) BY bucket = BUCKET(@timestamp, "30 minutes") +| SORT min +; + + min:date | max:date | bucket:date +2023-10-23T12:15:03.360Z|2023-10-23T12:27:28.948Z|2023-10-23T12:00:00.000Z +2023-10-23T13:33:34.937Z|2023-10-23T13:55:01.543Z|2023-10-23T13:30:00.000Z +; + +bucketByMonthInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| EVAL adjusted = CASE(TO_LONG(@timestamp) % 2 == 0, @timestamp + 1 month, @timestamp + 2 years) +| STATS c = COUNT(*) BY b = BUCKET(adjusted, "1 month") +| SORT c +; + +c:long |b:date +3 |2025-10-01T00:00:00.000Z +4 |2023-11-01T00:00:00.000Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 237c6a9af197f..7e7c561fac3a5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -1286,3 +1286,108 @@ ROW a = GREATEST(TO_DATETIME("1957-05-23T00:00:00Z"), TO_DATETIME("1958-02-19T00 a:datetime 1958-02-19T00:00:00 ; + +evalDateTruncMonthInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT hire_date +| EVAL x = date_trunc("1 month", hire_date) +| KEEP emp_no, hire_date, x +| LIMIT 5; + +emp_no:integer | hire_date:date | x:date +10009 | 1985-02-18T00:00:00.000Z | 1985-02-01T00:00:00.000Z +10048 | 1985-02-24T00:00:00.000Z | 1985-02-01T00:00:00.000Z +10098 | 1985-05-13T00:00:00.000Z | 1985-05-01T00:00:00.000Z +10076 | 1985-07-09T00:00:00.000Z | 1985-07-01T00:00:00.000Z +10061 | 1985-09-17T00:00:00.000Z | 1985-09-01T00:00:00.000Z +; + +evalDateTruncHourInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT hire_date +| EVAL x = date_trunc("240 hours", hire_date) +| KEEP emp_no, hire_date, x +| LIMIT 5; + +emp_no:integer | hire_date:date | x:date +10009 | 1985-02-18T00:00:00.000Z | 1985-02-11T00:00:00.000Z +10048 | 1985-02-24T00:00:00.000Z | 1985-02-21T00:00:00.000Z +10098 | 1985-05-13T00:00:00.000Z | 1985-05-12T00:00:00.000Z +10076 | 1985-07-09T00:00:00.000Z | 1985-07-01T00:00:00.000Z +10061 | 1985-09-17T00:00:00.000Z | 1985-09-09T00:00:00.000Z +; + +evalDateTruncDayInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| SORT @timestamp ASC +| EVAL t = DATE_TRUNC("1 day", @timestamp) +| KEEP t; + +t:date +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +; + +evalDateTruncMinuteInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| SORT @timestamp ASC +| EVAL t = DATE_TRUNC("1 minute", @timestamp) +| KEEP t; + +t:date +2023-10-23T12:15:00 +2023-10-23T12:27:00 +2023-10-23T13:33:00 +2023-10-23T13:51:00 +2023-10-23T13:52:00 +2023-10-23T13:53:00 +2023-10-23T13:55:00 +; + +evalDateTruncDayInStringNull +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| WHERE emp_no == 10040 +| EVAL x = date_trunc("1 day", birth_date) +| KEEP emp_no, birth_date, x; + +emp_no:integer | birth_date:date | x:date +10040 | null | null +; + +evalDateTruncYearInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +ROW a = 1 +| EVAL year_hired = DATE_TRUNC("1 year", "1991-06-26T00:00:00.000Z") +; + +a:integer | year_hired:date +1 | 1991-01-01T00:00:00.000Z +; + +filteringWithTemporalAmountInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT emp_no +| WHERE birth_date < "2024-01-01" - 70 years +| STATS cnt = count(*); + +cnt:long +19 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 14d811535aafd..a6e1a771374ca 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -4,7 +4,7 @@ // the comments in whatever file the test already lives in. If you have to // write a new test to make an example in the docs then put it in whatever // file matches its "theme" best. Put it next to similar tests. Not here. - + // Also! When Nik originally extracted examples from the docs to make them // testable he didn't spend a lot of time putting the docs into appropriate // files. He just made this one. He didn't put his toys away. We'd be better @@ -352,18 +352,18 @@ FROM employees // tag::case-result[] emp_no:integer | languages:integer| type:keyword -10001 | 2 |bilingual -10002 | 5 |polyglot -10003 | 4 |polyglot -10004 | 5 |polyglot -10005 | 1 |monolingual +10001 | 2 |bilingual +10002 | 5 |polyglot +10003 | 4 |polyglot +10004 | 5 |polyglot +10005 | 1 |monolingual // end::case-result[] ; docsCountAll // tag::countAll[] -FROM employees -| STATS count = COUNT(*) BY languages +FROM employees +| STATS count = COUNT(*) BY languages | SORT languages DESC // end::countAll[] ; @@ -371,7 +371,7 @@ FROM employees // tag::countAll-result[] count:long | languages:integer 10 |null -21 |5 +21 |5 18 |4 17 |3 19 |2 @@ -381,8 +381,8 @@ count:long | languages:integer basicGrok // tag::basicGrok[] -ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" +ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" | KEEP date, ip, email, num // end::basicGrok[] ; @@ -395,8 +395,8 @@ date:keyword | ip:keyword | email:keyword | num:keyword grokWithConversionSuffix // tag::grokWithConversionSuffix[] -ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" +ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num // end::grokWithConversionSuffix[] ; @@ -409,8 +409,8 @@ date:keyword | ip:keyword | email:keyword | num:integer grokWithToDatetime // tag::grokWithToDatetime[] -ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" +ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num | EVAL date = TO_DATETIME(date) // end::grokWithToDatetime[] @@ -471,7 +471,7 @@ Tokyo | 100-7014 | null basicDissect // tag::basicDissect[] -ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" +ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" | DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip // end::basicDissect[] @@ -485,8 +485,8 @@ date:keyword | msg:keyword | ip:keyword dissectWithToDatetime // tag::dissectWithToDatetime[] -ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" -| DISSECT a """%{date} - %{msg} - %{ip}""" +ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" +| DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip | EVAL date = TO_DATETIME(date) // end::dissectWithToDatetime[] @@ -574,8 +574,8 @@ FROM employees // tag::like-result[] first_name:keyword | last_name:keyword -Ebbe |Callaway -Eberhardt |Terkki +Ebbe |Callaway +Eberhardt |Terkki // end::like-result[] ; @@ -589,7 +589,7 @@ FROM employees // tag::rlike-result[] first_name:keyword | last_name:keyword -Alejandro |McAlpine +Alejandro |McAlpine // end::rlike-result[] ; @@ -660,18 +660,19 @@ FROM sample_data docsBitLength required_capability: fn_bit_length // tag::bitLength[] -FROM employees -| KEEP first_name, last_name -| EVAL fn_bit_length = BIT_LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city), fn_bit_length = BIT_LENGTH(city) // end::bitLength[] -| SORT first_name +| SORT city | LIMIT 3 ; // tag::bitLength-result[] -first_name:keyword | last_name:keyword | fn_bit_length:integer -Alejandro |McAlpine |72 -Amabile |Gomatam |56 -Anneke |Preusig |48 +city:keyword | fn_length:integer | fn_bit_length:integer +Agwār | 5 | 48 +Ahmedabad | 9 | 72 +Bangalore | 9 | 72 // end::bitLength-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 61a0ccd4af0c5..592b06107c8b5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -22,7 +22,7 @@ FROM addresses | SORT city.name ; -city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword Netherlands | Amsterdam | EARTH United States of America | San Francisco | EARTH Japan | Tokyo | EARTH @@ -138,39 +138,39 @@ a:integer | b:integer | c:integer | d:integer | e:integer multipleDuplicateInterleaved1 row a = 1 | eval b = a, c = 1, c = 3, d = b + 1, b = c * 2, c = 2, c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; multipleDuplicateInterleaved2 row a = 1 | eval b = a, c = 1 | eval c = 3, d = b + 1 | eval b = c * 2, c = 2 | eval c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; multipleDuplicateInterleaved3 row a = 1 | eval b = a, c = 1, c = 3 | eval d = b + 1 | eval b = c * 2, c = 2, c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; multipleDuplicateInterleaved4 row a = 1 | eval b = a | eval c = 1 | eval c = 3 | eval d = b + 1 | eval b = c * 2 | eval c = 2 | eval c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; projectEval row x = 1 | keep x | eval a1 = x + 1, a2 = x + 1, a3 = a1 + a2, a1 = a1 + a2; -x:integer | a2:integer | a3:integer | a1:integer -1 | 2 | 4 | 4 +x:integer | a2:integer | a3:integer | a1:integer +1 | 2 | 4 | 4 ; evalNullSort @@ -195,76 +195,76 @@ Uri evalWithIsNullIsNotNull from employees | eval true_bool = null is null, false_bool = null is not null, negated_true = not(null is null), negated_false = not(null is not null) | sort emp_no | limit 1 | keep *true*, *false*, first_name, last_name; -true_bool:boolean | negated_true:boolean | false_bool:boolean | negated_false:boolean | first_name:keyword | last_name:keyword +true_bool:boolean | negated_true:boolean | false_bool:boolean | negated_false:boolean | first_name:keyword | last_name:keyword true | false | false | true | Georgi | Facello ; repetitiveEval -from employees | sort emp_no | keep emp_no | eval sum = emp_no + 1 -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| limit 3 +from employees | sort emp_no | keep emp_no | eval sum = emp_no + 1 +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| limit 3 ; emp_no:i | sum:i 10001 | 3230324 10002 | 3230647 10003 | 3230970 -; +; chainedEvalReusingPreviousValue from employees | sort emp_no | eval x1 = concat(first_name, "."), x2 = concat(x1, "."), x3 = concat(x2, ".") | keep x*, first_name | limit 5; - x1:keyword | x2:keyword | x3:keyword |first_name:keyword -Georgi. |Georgi.. |Georgi... |Georgi -Bezalel. |Bezalel.. |Bezalel... |Bezalel -Parto. |Parto.. |Parto... |Parto -Chirstian. |Chirstian.. |Chirstian... |Chirstian + x1:keyword | x2:keyword | x3:keyword |first_name:keyword +Georgi. |Georgi.. |Georgi... |Georgi +Bezalel. |Bezalel.. |Bezalel... |Bezalel +Parto. |Parto.. |Parto... |Parto +Chirstian. |Chirstian.. |Chirstian... |Chirstian Kyoichi. |Kyoichi.. |Kyoichi... |Kyoichi ; @@ -272,10 +272,10 @@ chainedEvalReusingPreviousValue2 from employees | sort emp_no | eval x1 = concat(first_name, "."), x2 = concat(x1, last_name), x3 = concat(x2, gender) | keep x*, first_name, gender | limit 5; x1:keyword | x2:keyword | x3:keyword |first_name:keyword|gender:keyword -Georgi. |Georgi.Facello |Georgi.FacelloM |Georgi |M -Bezalel. |Bezalel.Simmel |Bezalel.SimmelF |Bezalel |F -Parto. |Parto.Bamford |Parto.BamfordM |Parto |M -Chirstian. |Chirstian.Koblick|Chirstian.KoblickM|Chirstian |M +Georgi. |Georgi.Facello |Georgi.FacelloM |Georgi |M +Bezalel. |Bezalel.Simmel |Bezalel.SimmelF |Bezalel |F +Parto. |Parto.Bamford |Parto.BamfordM |Parto |M +Chirstian. |Chirstian.Koblick|Chirstian.KoblickM|Chirstian |M Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakM |Kyoichi |M ; @@ -283,10 +283,10 @@ chainedEvalReusingPreviousValue3 from employees | sort emp_no | eval x1 = concat(first_name, "."), x2 = concat(x1, last_name), x3 = concat(x2, x1) | keep x*, first_name | limit 5; x1:keyword | x2:keyword | x3:keyword |first_name:keyword -Georgi. |Georgi.Facello |Georgi.FacelloGeorgi. |Georgi -Bezalel. |Bezalel.Simmel |Bezalel.SimmelBezalel. |Bezalel -Parto. |Parto.Bamford |Parto.BamfordParto. |Parto -Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian +Georgi. |Georgi.Facello |Georgi.FacelloGeorgi. |Georgi +Bezalel. |Bezalel.Simmel |Bezalel.SimmelBezalel. |Bezalel +Parto. |Parto.Bamford |Parto.BamfordParto. |Parto +Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakKyoichi. |Kyoichi ; @@ -301,7 +301,7 @@ warning:Line 1:88: java.lang.IllegalArgumentException: single-value function enc warning:Line 1:133: evaluation of [round([1.14], [1, 2])] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:133: java.lang.IllegalArgumentException: single-value function encountered multi-value -a:double | b:double | c:double | d: double | e:double | f:double | g:double | h:double +a:double | b:double | c:double | d: double | e:double | f:double | g:double | h:double 1.2 | [2.4, 7.9] | 1.0 | null | 1.0 | null | 1.1 | null ; @@ -356,22 +356,43 @@ FROM sample_data docsLength // tag::length[] -FROM employees -| KEEP first_name, last_name -| EVAL fn_length = LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city) // end::length[] -| SORT first_name +| SORT city | LIMIT 3 ; // tag::length-result[] -first_name:keyword | last_name:keyword | fn_length:integer -Alejandro |McAlpine |9 -Amabile |Gomatam |7 -Anneke |Preusig |6 +city:keyword | fn_length:integer +Agwār | 5 +Ahmedabad | 9 +Bangalore | 9 // end::length-result[] ; +docsByteLength +required_capability: fn_byte_length +// tag::byteLength[] +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city), fn_byte_length = BYTE_LENGTH(city) +// end::byteLength[] +| SORT city +| LIMIT 3 +; + +// tag::byteLength-result[] +city:keyword | fn_length:integer | fn_byte_length:integer +Agwār | 5 | 6 +Ahmedabad | 9 | 9 +Bangalore | 9 | 9 +// end::byteLength-result[] +; + docsGettingStartedEvalNoColumnName // tag::gs-eval-no-column-name[] FROM sample_data @@ -407,8 +428,8 @@ FROM employees // tag::eval-result[] first_name:keyword | last_name:keyword | height:double | height_feet:double | height_cm:double Georgi |Facello |2.03 |6.66043 |202.99999999999997 -Bezalel |Simmel |2.08 |6.82448 |208.0 -Parto |Bamford |1.83 |6.004230000000001 |183.0 +Bezalel |Simmel |2.08 |6.82448 |208.0 +Parto |Bamford |1.83 |6.004230000000001 |183.0 // end::eval-result[] ; @@ -423,9 +444,9 @@ FROM employees // tag::evalReplace-result[] first_name:keyword | last_name:keyword | height:double -Georgi |Facello |6.66043 -Bezalel |Simmel |6.82448 -Parto |Bamford |6.004230000000001 +Georgi |Facello |6.66043 +Bezalel |Simmel |6.82448 +Parto |Bamford |6.004230000000001 // end::evalReplace-result[] ; @@ -440,8 +461,8 @@ FROM employees // tag::evalUnnamedColumn-result[] first_name:keyword | last_name:keyword | height:double | height * 3.281:double -Georgi |Facello |2.03 |6.66043 -Bezalel |Simmel |2.08 |6.82448 +Georgi |Facello |2.03 |6.66043 +Bezalel |Simmel |2.08 |6.82448 Parto |Bamford |1.83 |6.004230000000001 // end::evalUnnamedColumn-result[] ; @@ -524,16 +545,16 @@ FROM employees | KEEP emp_no, salary, sum ; - emp_no:i | salary:i | sum:i --10015 |25324 |35339 --10035 |25945 |35980 --10092 |25976 |36068 --10048 |26436 |36484 --10057 |27215 |37272 --10084 |28035 |38119 --10026 |28336 |38362 --10068 |28941 |39009 --10060 |29175 |39235 + emp_no:i | salary:i | sum:i +-10015 |25324 |35339 +-10035 |25945 |35980 +-10092 |25976 |36068 +-10048 |26436 |36484 +-10057 |27215 |37272 +-10084 |28035 |38119 +-10026 |28336 |38362 +-10068 |28941 |39009 +-10060 |29175 |39235 -10042 |30404 |40446 ; @@ -545,16 +566,16 @@ from employees | limit 10 ; - first_name:keyword | last_name:keyword | salary:integer|ll:keyword|lf:keyword -Mona |Azuma |46595 |A |M -Satosi |Awdeh |50249 |A |S -Brendon |Bernini |33370 |B |B -Breannda |Billingsley |29175 |B |B -Cristinel |Bouloucos |58715 |B |C -Charlene |Brattka |28941 |B |C -Margareta |Bierman |41933 |B |M -Mokhtar |Bernatsky |38992 |B |M -Parto |Bamford |61805 |B |P + first_name:keyword | last_name:keyword | salary:integer|ll:keyword|lf:keyword +Mona |Azuma |46595 |A |M +Satosi |Awdeh |50249 |A |S +Brendon |Bernini |33370 |B |B +Breannda |Billingsley |29175 |B |B +Cristinel |Bouloucos |58715 |B |C +Charlene |Brattka |28941 |B |C +Margareta |Bierman |41933 |B |M +Mokhtar |Bernatsky |38992 |B |M +Parto |Bamford |61805 |B |P Premal |Baek |52833 |B |P ; @@ -568,15 +589,15 @@ from employees | limit 10 ; - fn:keyword | ln:keyword | salary:integer| c:keyword -Mona |Azuma |46595 |AM -Satosi |Awdeh |50249 |AS -Brendon |Bernini |33370 |BB -Breannda |Billingsley |29175 |BB -Cristinel |Bouloucos |58715 |BC -Charlene |Brattka |28941 |BC -Margareta |Bierman |41933 |BM -Mokhtar |Bernatsky |38992 |BM -Parto |Bamford |61805 |BP + fn:keyword | ln:keyword | salary:integer| c:keyword +Mona |Azuma |46595 |AM +Satosi |Awdeh |50249 |AS +Brendon |Bernini |33370 |BB +Breannda |Billingsley |29175 |BB +Cristinel |Bouloucos |58715 |BC +Charlene |Brattka |28941 |BC +Margareta |Bierman |41933 |BM +Mokhtar |Bernatsky |38992 |BM +Parto |Bamford |61805 |BP Premal |Baek |52833 |BP ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec new file mode 100644 index 0000000000000..4d7ee9b1b5af6 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec @@ -0,0 +1,151 @@ +simpleKeep +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _index desc, emp_no | limit 2 | keep emp_no, _index, _version; + +emp_no:integer |_index:keyword |_version:long +10001 |remote_cluster:employees |1 +10002 |remote_cluster:employees |1 +; + +aliasWithSameName +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _index desc, emp_no | limit 2 | eval _index = _index, _version = _version | keep emp_no, _index, _version; + +emp_no:integer |_index:keyword |_version:long +10001 |remote_cluster:employees |1 +10002 |remote_cluster:employees |1 +; + +inComparison +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | where _index == "remote_cluster:employees" | where _version == 1 | keep emp_no | limit 2; + +emp_no:integer +10001 +10002 +; + +metaIndexInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +FROM employees METADATA _index, _id +| STATS max = MAX(emp_no) BY _index | SORT _index; + +max:integer |_index:keyword +10100 |remote_cluster:employees +; + +metaIndexAliasedInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i | SORT _i; + +max:integer |_i:keyword +10100 |remote_cluster:employees +; + +metaVersionInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | stats min = min(emp_no) by _version; + +min:integer |_version:long +10001 |1 +; + +metaVersionAliasedInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | eval _v = _version | stats min = min(emp_no) by _v; + +min:integer |_v:long +10001 |1 +; + +inAggsAndAsGroups +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | stats max = max(_version) by _index | SORT _index; + +max:long |_index:keyword +1 |remote_cluster:employees +; + +inAggsAndAsGroupsAliased +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | eval _i = _index, _v = _version | stats max = max(_v) by _i | SORT _i; + +max:long |_i:keyword +1 |remote_cluster:employees +; + +inFunction +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | where length(_index) == length("remote_cluster:employees") | where abs(_version) == 1 | keep emp_no | limit 2; + +emp_no:integer +10001 +10002 +; + +inArithmetics +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | eval i = _version + 2 | stats min = min(emp_no) by i; + +min:integer |i:long +10001 |3 +; + +inSort +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _version, _index desc, emp_no | keep emp_no, _version, _index | limit 2; + +emp_no:integer |_version:long |_index:keyword +10001 |1 |remote_cluster:employees +10002 |1 |remote_cluster:employees +; + +withMvFunction +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | eval i = mv_avg(_version) + 2 | stats min = min(emp_no) by i; + +min:integer |i:double +10001 |3.0 +; + +overwritten +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | eval _index = 3, _version = "version" | keep emp_no, _index, _version | limit 3; + +emp_no:integer |_index:integer |_version:keyword +10001 |3 |version +10002 |3 |version +10003 |3 |version +; + +multipleIndices +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +FROM ul_logs, apps METADATA _index, _version +| WHERE id IN (13, 14) AND _version == 1 +| EVAL key = CONCAT(_index, "_", TO_STR(id)) +| SORT id, _index +| KEEP id, _index, _version, key +; + + id:long |_index:keyword |_version:long |key:keyword +13 |remote_cluster:apps |1 |remote_cluster:apps_13 +13 |remote_cluster:ul_logs |1 |remote_cluster:ul_logs_13 +14 |remote_cluster:apps |1 |remote_cluster:apps_14 +14 |remote_cluster:ul_logs |1 |remote_cluster:ul_logs_14 + +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 448ee57b34c58..ad9de4674f8e1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2382,6 +2382,116 @@ max:integer |max_a:integer|min:integer | min_a:integer 74999 |null |25324 | null ; +statsWithAllFiltersFalse +required_capability: per_agg_filtering +from employees +| stats max = max(height.float) where false, + min = min(height.float) where to_string(null) == "abc", + count = count(height.float) where false, + count_distinct = count_distinct(salary) where to_string(null) == "def" +; + +max:double |min:double |count:long |count_distinct:long +null |null |0 |0 +; + +statsWithExpressionsAllFiltersFalse +required_capability: per_agg_filtering +from employees +| stats max = max(height.float + 1) where null, + count = count(height.float) + 2 where false, + mix = min(height.float + 1) + count_distinct(emp_no) + 2 where length(null) == 3 +; + +max:double |count:long |mix:double +null |2 |null +; + +statsWithFalseFilterAndGroup +required_capability: per_agg_filtering +from employees +| stats max = max(height.float + 1) where null, + count = count(height.float) + 2 where false + by job_positions +| sort job_positions +| limit 4 +; + +max:double |count:long |job_positions:keyword +null |2 |Accountant +null |2 |Architect +null |2 |Business Analyst +null |2 |Data Scientist +; + +statsWithFalseFiltersAndGroups +required_capability: per_agg_filtering +from employees +| eval my_length = length(concat(first_name, null)) +| stats count_distinct = count_distinct(height.float + 1) where null, + count = count(height.float) + 2 where false, + values = values(first_name) where my_length > 3 + by job_positions, is_rehired +| sort job_positions, is_rehired +| limit 10 +; + +count_distinct:long |count:long |values:keyword |job_positions:keyword |is_rehired:boolean +0 |2 |null |Accountant |false +0 |2 |null |Accountant |true +0 |2 |null |Accountant |null +0 |2 |null |Architect |false +0 |2 |null |Architect |true +0 |2 |null |Architect |null +0 |2 |null |Business Analyst |false +0 |2 |null |Business Analyst |true +0 |2 |null |Business Analyst |null +0 |2 |null |Data Scientist |false +; + +statsWithMixedFiltersAndGroup +required_capability: per_agg_filtering +from employees +| eval my_length = length(concat(first_name, null)) +| stats count = count(my_length) where false, + values = mv_slice(mv_sort(values(first_name)), 0, 1) + by job_positions +| sort job_positions +| limit 4 +; + +count:long |values:keyword |job_positions:keyword +0 |[Arumugam, Bojan] |Accountant +0 |[Alejandro, Charlene] |Architect +0 |[Basil, Breannda] |Business Analyst +0 |[Berni, Breannda] |Data Scientist +; + +prunedStatsFollowedByStats +from employees +| eval my_length = length(concat(first_name, null)) +| stats count = count(my_length) where false, + values = mv_slice(values(first_name), 0, 1) where my_length > 0 +| stats count_distinct = count_distinct(count) +; + +count_distinct:long +1 +; + +statsWithFalseFiltersFromRow +required_capability: per_agg_filtering +row x = null, a = 1, b = [2,3,4] +| stats c=max(a) where x + by b +; + +c:integer |b:integer +null |2 +null |3 +null |4 +; + statsWithBasicExpressionFiltered required_capability: per_agg_filtering from employees @@ -2531,6 +2641,57 @@ c2:l |c2_f:l |m2:i |m2_f:i |c:l 1 |1 |5 |5 |21 ; +commonFilterExtractionWithAliasing +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| drop emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where eno <= 10010 +; + +min_sal:integer |min_hei:double +36174 |1.56 +; + +commonFilterExtractionWithAliasAndOriginal +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where emp_no <= 10010 +; + +// same results as above in commonFilterExtractionWithAliasing +min_sal:integer |min_hei:double +36174 |1.56 +; + +commonFilterExtractionWithAliasAndOriginalNeedingNormalization +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where emp_no <= 10010, + max_hei = max(height) where 10010 >= emp_no +; + +min_sal:integer |min_hei:double |max_hei:double +36174 |1.56 |2.1 +; + +commonFilterExtractionWithAliasAndOriginalNeedingNormalizationAndSimplification +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where not (emp_no > 10010), + max_hei = max(height) where 10010 >= emp_no +; + +min_sal:integer |min_hei:double |max_hei:double +36174 |1.56 |2.1 +; statsByConstantExpressionNoAggs required_capability: fix_stats_by_foldable_expression diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index ba44adb5a85e0..6801e1f4eb404 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Priority; @@ -21,12 +22,16 @@ import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -35,30 +40,36 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class CrossClustersQueryIT extends AbstractMultiClustersTestCase { - private static final String REMOTE_CLUSTER = "cluster-a"; + private static final String REMOTE_CLUSTER_1 = "cluster-a"; + private static final String REMOTE_CLUSTER_2 = "remote-b"; @Override protected Collection remoteClusterAlias() { - return List.of(REMOTE_CLUSTER); + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); } @Override protected Map skipUnavailableForRemoteClusters() { - return Map.of(REMOTE_CLUSTER, randomBoolean()); + return Map.of(REMOTE_CLUSTER_1, randomBoolean()); } @Override @@ -90,7 +101,7 @@ public void testSuccessfulPathways() { Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); - try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats sum (v)", requestIncludeMeta)) { + try (EsqlQueryResponse resp = runQuery("from logs-*,c*:logs-* | stats sum (v)", requestIncludeMeta)) { List> values = getValuesList(resp); assertThat(values, hasSize(1)); assertThat(values.get(0), equalTo(List.of(330L))); @@ -102,9 +113,9 @@ public void testSuccessfulPathways() { assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -128,7 +139,7 @@ public void testSuccessfulPathways() { assertClusterMetadataInResponse(resp, responseExpectMeta); } - try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats count(*) by tag | sort tag | keep tag", requestIncludeMeta)) { + try (EsqlQueryResponse resp = runQuery("from logs-*,c*:logs-* | stats count(*) by tag | sort tag | keep tag", requestIncludeMeta)) { List> values = getValuesList(resp); assertThat(values, hasSize(2)); assertThat(values.get(0), equalTo(List.of("local"))); @@ -141,9 +152,9 @@ public void testSuccessfulPathways() { assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -168,171 +179,695 @@ public void testSuccessfulPathways() { } } - public void testSearchesWhereMissingIndicesAreSpecified() { - Map testClusterInfo = setupTwoClusters(); + public void testSearchesAgainstNonMatchingIndicesWithLocalOnly() { + Map testClusterInfo = setupClusters(2); + String localIndex = (String) testClusterInfo.get("local.index"); + + { + String q = "FROM nomatch," + localIndex; + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> runQuery(q, false)); + assertThat(e.getDetailedMessage(), containsString("no such index [nomatch]")); + + // MP TODO: am I able to fix this from the field-caps call? Yes, if we detect concrete vs. wildcard expressions in user query + // TODO bug - this does not throw; uncomment this test once https://github.com/elastic/elasticsearch/issues/114495 is fixed + // String limit0 = q + " | LIMIT 0"; + // VerificationException ve = expectThrows(VerificationException.class, () -> runQuery(limit0, false)); + // assertThat(ve.getDetailedMessage(), containsString("No matching indices for [nomatch]")); + } + + { + // no failure since concrete index matches, so wildcard matching is lenient + String q = "FROM nomatch*," + localIndex; + try (EsqlQueryResponse resp = runQuery(q, false)) { + // we are only testing that this does not throw an Exception, so the asserts below are minimal + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, false)) { + // we are only testing that this does not throw an Exception, so the asserts below are minimal + assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + } + } + { + String q = "FROM nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + } + { + String q = "FROM nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch*]")); + } + } + + public void testSearchesAgainstIndicesWithNoMappingsSkipUnavailableTrue() { + int numClusters = 2; + setupClusters(numClusters); + Map clusterToEmptyIndexMap = createEmptyIndicesWithNoMappings(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, randomBoolean()); + + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + try { + String emptyIndex = clusterToEmptyIndexMap.get(REMOTE_CLUSTER_1); + String q = Strings.format("FROM cluster-a:%s", emptyIndex); + // query without referring to fields should work + { + String limit1 = q + " | LIMIT 1"; + try (EsqlQueryResponse resp = runQuery(limit1, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + assertThat(resp.columns().get(0).name(), equalTo("")); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of(new ExpectedCluster(REMOTE_CLUSTER_1, emptyIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + assertThat(resp.columns().get(0).name(), equalTo("")); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of(new ExpectedCluster(REMOTE_CLUSTER_1, emptyIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)) + ); + } + } + + // query that refers to missing fields should throw: + // "type": "verification_exception", + // "reason": "Found 1 problem\nline 2:7: Unknown column [foo]", + { + String keepQuery = q + " | KEEP foo | LIMIT 100"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(keepQuery, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown column [foo]")); + } + + } finally { + clearSkipUnavailable(); + } + } + + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableTrue() { + int numClusters = 3; + Map testClusterInfo = setupClusters(numClusters); int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); - int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + int remote1NumShards = (Integer) testClusterInfo.get("remote.num_shards"); + int remote2NumShards = (Integer) testClusterInfo.get("remote2.num_shards"); + String localIndex = (String) testClusterInfo.get("local.index"); + String remote1Index = (String) testClusterInfo.get("remote.index"); + String remote2Index = (String) testClusterInfo.get("remote2.index"); + + createIndexAliases(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, true); Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); - // since a valid local index was specified, the invalid index on cluster-a does not throw an exception, - // but instead is simply ignored - ensure this is captured in the EsqlExecutionInfo - try (EsqlQueryResponse resp = runQuery("from logs-*,cluster-a:no_such_index | stats sum (v)", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of(45L))); + try { + // missing concrete local index is fatal + { + String q = "FROM nomatch,cluster-a:" + randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + } - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s,cluster-a:nomatch", localIndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThan(0)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + } - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String remoteIndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = "FROM nomatch*,cluster-a:" + remoteIndexName; + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster( + REMOTE_CLUSTER_1, + remoteIndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote1NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), equalTo(0)); + assertThat(resp.columns().size(), greaterThan(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + // LIMIT 0 searches always have total shards = 0 + new ExpectedCluster(REMOTE_CLUSTER_1, remoteIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); // 0 since no matching index, thus no shards to search - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + // since at least one index of the query matches on some cluster, a wildcarded index on skip_un=true is not an error + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s,cluster-a:nomatch*", localIndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThan(0)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + } - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("logs-*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); - assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); - } + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true + { + // with non-matching concrete index + String q = "FROM cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); - // since the remote cluster has a valid index expression, the missing local index is ignored - // make this is captured in the EsqlExecutionInfo - try ( - EsqlQueryResponse resp = runQuery( - "from no_such_index,*:logs-* | stats count(*) by tag | sort tag | keep tag", - requestIncludeMeta - ) - ) { - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of("remote"))); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true and the + // index was wildcarded + { + // with non-matching wildcard index + String q = "FROM cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + // an error is thrown if there are no matching indices at all - local with wildcard, remote with concrete + { + String q = "FROM nomatch*,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); + } - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("no_such_index")); - // TODO: a follow on PR will change this to throw an Exception when the local cluster requests a concrete index that is missing - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(0)); - assertThat(localCluster.getSuccessfulShards(), equalTo(0)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); - } + // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard + { + String q = "FROM nomatch*,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); - // when multiple invalid indices are specified on the remote cluster, both should be ignored and present - // in the index expression of the EsqlExecutionInfo and with an indication that zero shards were searched - try ( - EsqlQueryResponse resp = runQuery( - "FROM no_such_index*,*:no_such_index1,*:no_such_index2,logs-1 | STATS COUNT(*) by tag | SORT tag | KEEP tag", - requestIncludeMeta - ) - ) { - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of("local"))); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); + } - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // an error is thrown if there are no matching indices at all - local with concrete, remote with concrete + { + String q = "FROM nomatch,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); + } - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index1,no_such_index2")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + // an error is thrown if there are no matching indices at all - local with concrete, remote with wildcard + { + String q = "FROM nomatch,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("no_such_index*,logs-1")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); - assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + } + + // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown + { + // TODO solve in follow-on PR which does skip_unavailable handling at execution time + // String q = Strings.format("FROM %s,cluster-a:nomatch,cluster-a:%s*", localIndex, remote1Index); + // try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + // assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + // EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + // assertThat(executionInfo.isCrossClusterSearch(), is(true)); + // assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // assertExpectedClustersForMissingIndicesTests(executionInfo, List.of( + // // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + // new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + // new ExpectedCluster(REMOTE_CLUSTER_1, "*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, remote2NumShards) + // )); + // } + + // TODO: handle LIMIT 0 for this case in follow-on PR + // String limit0 = q + " | LIMIT 0"; + // try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + // assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + // assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + // EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + // assertThat(executionInfo.isCrossClusterSearch(), is(true)); + // assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // assertExpectedClustersForMissingIndicesTests(executionInfo, List.of( + // // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + // new ExpectedCluster(LOCAL_CLUSTER, localIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + // new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch," + remote1Index + "*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + // )); + // } + } + + // tests with three clusters --- + + // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown + // cluster-a should be marked as SKIPPED with VerificationException + { + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM nomatch*,cluster-a:nomatch,%s:%s", REMOTE_CLUSTER_2, remote2IndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster( + REMOTE_CLUSTER_2, + remote2IndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote2NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster(REMOTE_CLUSTER_2, remote2IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } + + // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown + // cluster-a should be marked as SKIPPED with a "NoMatchingIndicesException" since a wildcard index was requested + { + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM nomatch*,cluster-a:nomatch*,%s:%s", REMOTE_CLUSTER_2, remote2IndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster( + REMOTE_CLUSTER_2, + remote2IndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote2NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster(REMOTE_CLUSTER_2, remote2IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } + } finally { + clearSkipUnavailable(); } + } - // wildcard on remote cluster that matches nothing - should be present in EsqlExecutionInfo marked as SKIPPED, no shards searched - try (EsqlQueryResponse resp = runQuery("from cluster-a:no_such_index*,logs-* | stats sum (v)", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of(45L))); + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableFalse() { + int numClusters = 3; + Map testClusterInfo = setupClusters(numClusters); + int remote1NumShards = (Integer) testClusterInfo.get("remote.num_shards"); + String localIndex = (String) testClusterInfo.get("local.index"); + String remote1Index = (String) testClusterInfo.get("remote.index"); + String remote2Index = (String) testClusterInfo.get("remote2.index"); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + createIndexAliases(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, false); + setSkipUnavailable(REMOTE_CLUSTER_2, false); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + try { + // missing concrete local index is an error + { + String q = "FROM nomatch,cluster-a:" + remote1Index; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + } - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("logs-*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); - assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); + // missing concrete remote index is fatal when skip_unavailable=false + { + String q = "FROM logs*,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } + + // No error since local non-matching has wildcard and the remote cluster matches + { + String remote1IndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_1, remote1IndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster( + REMOTE_CLUSTER_1, + remote1IndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote1NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), equalTo(0)); + assertThat(resp.columns().size(), greaterThan(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + // LIMIT 0 searches always have total shards = 0 + new ExpectedCluster(REMOTE_CLUSTER_1, remote1IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } + + // query is fatal since cluster-a has skip_unavailable=false and has no matching indices + { + String q = Strings.format("FROM %s,cluster-a:nomatch*", randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS)); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - single remote cluster with concrete index expression + { + String q = "FROM cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } + + // an error is thrown if there are no matching indices at all - single remote cluster with wildcard index expression + { + String q = "FROM cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - local with wildcard, remote with concrete + { + String q = "FROM nomatch*,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard + { + String q = "FROM nomatch*,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - local with concrete, remote with concrete + { + String q = "FROM nomatch,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); + } + + // an error is thrown if there are no matching indices at all - local with concrete, remote with wildcard + { + String q = "FROM nomatch,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + } + + // Missing concrete index on skip_unavailable=false cluster is a fatal error, even when another index expression + // against that cluster matches + { + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s,cluster-a:nomatch,cluster-a:%s*", localIndex, remote2IndexName); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("no such index [nomatch]")); + + // TODO: in follow on PR, add support for throwing a VerificationException from this scenario + // String limit0 = q + " | LIMIT 0"; + // VerificationException e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + // assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + } + + // --- test against 3 clusters + + // skip_unavailable=false cluster having no matching indices is a fatal error. This error + // is fatal at plan time, so it throws VerificationException, not IndexNotFoundException (thrown at execution time) + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s*,cluster-a:nomatch,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } + + // skip_unavailable=false cluster having no matching indices is a fatal error (even if wildcarded) + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s*,cluster-a:nomatch*,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } + } finally { + clearSkipUnavailable(); + } + } + + record ExpectedCluster(String clusterAlias, String indexExpression, EsqlExecutionInfo.Cluster.Status status, Integer totalShards) {} + + public void assertExpectedClustersForMissingIndicesTests(EsqlExecutionInfo executionInfo, List expected) { + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + + Set expectedClusterAliases = expected.stream().map(c -> c.clusterAlias()).collect(Collectors.toSet()); + assertThat(executionInfo.clusterAliases(), equalTo(expectedClusterAliases)); + + for (ExpectedCluster expectedCluster : expected) { + EsqlExecutionInfo.Cluster cluster = executionInfo.getCluster(expectedCluster.clusterAlias()); + String msg = cluster.getClusterAlias(); + assertThat(msg, cluster.getIndexExpression(), equalTo(expectedCluster.indexExpression())); + assertThat(msg, cluster.getStatus(), equalTo(expectedCluster.status())); + assertThat(msg, cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(msg, cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(msg, cluster.getTotalShards(), equalTo(expectedCluster.totalShards())); + if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SUCCESSFUL) { + assertThat(msg, cluster.getSuccessfulShards(), equalTo(expectedCluster.totalShards())); + assertThat(msg, cluster.getSkippedShards(), equalTo(0)); + } else if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SKIPPED) { + assertThat(msg, cluster.getSuccessfulShards(), equalTo(0)); + assertThat(msg, cluster.getSkippedShards(), equalTo(expectedCluster.totalShards())); + assertThat(msg, cluster.getFailures().size(), equalTo(1)); + assertThat(msg, cluster.getFailures().get(0).getCause(), instanceOf(VerificationException.class)); + String expectedMsg = "Unknown index [" + expectedCluster.indexExpression() + "]"; + assertThat(msg, cluster.getFailures().get(0).getCause().getMessage(), containsString(expectedMsg)); + } + // currently failed shards is always zero - change this once we start allowing partial data for individual shard failures + assertThat(msg, cluster.getFailedShards(), equalTo(0)); } } @@ -359,6 +894,10 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); } + // skip_un must be true for the next test or it will fail on "cluster-a:no_such_index*" with a + // VerificationException because there are no matching indices for that skip_un=false cluster. + setSkipUnavailable(REMOTE_CLUSTER_1, true); + // cluster-foo* matches nothing and so should not be present in the EsqlExecutionInfo try ( EsqlQueryResponse resp = runQuery( @@ -376,9 +915,9 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -395,6 +934,8 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); assertThat(localCluster.getSkippedShards(), equalTo(0)); assertThat(localCluster.getFailedShards(), equalTo(0)); + } finally { + clearSkipUnavailable(); } } @@ -403,10 +944,12 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { * (which involves cross-cluster field-caps calls), it is a coordinator only operation at query time * which uses a different pathway compared to queries that require data node (and remote data node) operations * at query time. + * + * Note: the tests covering "nonmatching indices" also do LIMIT 0 tests. + * This one is mostly focuses on took time values. */ public void testCCSExecutionOnSearchesWithLimit0() { setupTwoClusters(); - Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -427,9 +970,9 @@ public void testCCSExecutionOnSearchesWithLimit0() { long overallTookMillis = executionInfo.overallTook().millis(); assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -449,66 +992,6 @@ public void testCCSExecutionOnSearchesWithLimit0() { assertThat(remoteCluster.getSkippedShards(), equalTo(0)); assertThat(remoteCluster.getFailedShards(), equalTo(0)); } - - try (EsqlQueryResponse resp = runQuery("FROM logs*,cluster-a:nomatch* | LIMIT 0", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); - - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("nomatch*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); - - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(0)); - assertThat(localCluster.getSuccessfulShards(), equalTo(0)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); - } - - try (EsqlQueryResponse resp = runQuery("FROM nomatch*,cluster-a:* | LIMIT 0", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); - - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); - - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("nomatch*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); - } } public void testMetadataIndex() { @@ -536,7 +1019,7 @@ public void testMetadataIndex() { assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -571,12 +1054,12 @@ public void testProfile() { .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.rebalance.enable", "none")) .get(); waitForNoInitializingShards(client(LOCAL_CLUSTER), TimeValue.timeValueSeconds(30), "logs-1"); - client(REMOTE_CLUSTER).admin() + client(REMOTE_CLUSTER_1).admin() .indices() .prepareUpdateSettings("logs-2") .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.rebalance.enable", "none")) .get(); - waitForNoInitializingShards(client(REMOTE_CLUSTER), TimeValue.timeValueSeconds(30), "logs-2"); + waitForNoInitializingShards(client(REMOTE_CLUSTER_1), TimeValue.timeValueSeconds(30), "logs-2"); final int localOnlyProfiles; { EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); @@ -593,7 +1076,7 @@ public void testProfile() { EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); assertNotNull(executionInfo); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertNull(remoteCluster); assertThat(executionInfo.isCrossClusterSearch(), is(false)); assertThat(executionInfo.includeCCSMetadata(), is(false)); @@ -621,7 +1104,7 @@ public void testProfile() { assertThat(executionInfo.includeCCSMetadata(), is(false)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -654,7 +1137,7 @@ public void testProfile() { assertThat(executionInfo.includeCCSMetadata(), is(false)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -704,7 +1187,7 @@ public void testWarnings() throws Exception { assertThat(executionInfo.includeCCSMetadata(), is(false)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -792,22 +1275,37 @@ void waitForNoInitializingShards(Client client, TimeValue timeout, String... ind } Map setupTwoClusters() { - String localIndex = "logs-1"; + return setupClusters(2); + } + + private static String LOCAL_INDEX = "logs-1"; + private static String IDX_ALIAS = "alias1"; + private static String FILTERED_IDX_ALIAS = "alias-filtered-1"; + private static String REMOTE_INDEX = "logs-2"; + + Map setupClusters(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "2 or 3 clusters supported not: " + numClusters; int numShardsLocal = randomIntBetween(1, 5); - populateLocalIndices(localIndex, numShardsLocal); + populateLocalIndices(LOCAL_INDEX, numShardsLocal); - String remoteIndex = "logs-2"; int numShardsRemote = randomIntBetween(1, 5); - populateRemoteIndices(remoteIndex, numShardsRemote); + populateRemoteIndices(REMOTE_CLUSTER_1, REMOTE_INDEX, numShardsRemote); Map clusterInfo = new HashMap<>(); clusterInfo.put("local.num_shards", numShardsLocal); - clusterInfo.put("local.index", localIndex); + clusterInfo.put("local.index", LOCAL_INDEX); clusterInfo.put("remote.num_shards", numShardsRemote); - clusterInfo.put("remote.index", remoteIndex); + clusterInfo.put("remote.index", REMOTE_INDEX); + + if (numClusters == 3) { + int numShardsRemote2 = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE_CLUSTER_2, REMOTE_INDEX, numShardsRemote2); + clusterInfo.put("remote2.index", REMOTE_INDEX); + clusterInfo.put("remote2.num_shards", numShardsRemote2); + } - String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER); - Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER).clusterService().getClusterSettings().get(skipUnavailableKey); + String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER_1); + Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER_1).clusterService().getClusterSettings().get(skipUnavailableKey); boolean skipUnavailable = (boolean) cluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).clusterService() .getClusterSettings() .get(skipUnavailableSetting); @@ -816,6 +1314,98 @@ Map setupTwoClusters() { return clusterInfo; } + /** + * For the local cluster and REMOTE_CLUSTER_1 it creates a standard alias to the index created in populateLocalIndices + * and populateRemoteIndices. It also creates a filtered alias against those indices that looks like: + * PUT /_aliases + * { + * "actions": [ + * { + * "add": { + * "index": "my_index", + * "alias": "my_alias", + * "filter": { + * "terms": { + * "v": [1, 2, 4] + * } + * } + * } + * } + * ] + * } + */ + void createIndexAliases(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "Only 2 or 3 clusters allowed in createIndexAliases"; + + int[] allowed = new int[] { 1, 2, 4 }; + QueryBuilder filterBuilder = new TermsQueryBuilder("v", allowed); + + { + Client localClient = client(LOCAL_CLUSTER); + IndicesAliasesResponse indicesAliasesResponse = localClient.admin() + .indices() + .prepareAliases() + .addAlias(LOCAL_INDEX, IDX_ALIAS) + .addAlias(LOCAL_INDEX, FILTERED_IDX_ALIAS, filterBuilder) + .get(); + assertFalse(indicesAliasesResponse.hasErrors()); + } + { + Client remoteClient = client(REMOTE_CLUSTER_1); + IndicesAliasesResponse indicesAliasesResponse = remoteClient.admin() + .indices() + .prepareAliases() + .addAlias(REMOTE_INDEX, IDX_ALIAS) + .addAlias(REMOTE_INDEX, FILTERED_IDX_ALIAS, filterBuilder) + .get(); + assertFalse(indicesAliasesResponse.hasErrors()); + } + if (numClusters == 3) { + Client remoteClient = client(REMOTE_CLUSTER_2); + IndicesAliasesResponse indicesAliasesResponse = remoteClient.admin() + .indices() + .prepareAliases() + .addAlias(REMOTE_INDEX, IDX_ALIAS) + .addAlias(REMOTE_INDEX, FILTERED_IDX_ALIAS, filterBuilder) + .get(); + assertFalse(indicesAliasesResponse.hasErrors()); + } + } + + Map createEmptyIndicesWithNoMappings(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "Only 2 or 3 clusters supported in createEmptyIndicesWithNoMappings"; + + Map clusterToEmptyIndexMap = new HashMap<>(); + + String localIndexName = randomAlphaOfLength(14).toLowerCase(Locale.ROOT) + "1"; + clusterToEmptyIndexMap.put(LOCAL_CLUSTER, localIndexName); + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin().indices().prepareCreate(localIndexName).setSettings(Settings.builder().put("index.number_of_shards", 1)) + ); + + String remote1IndexName = randomAlphaOfLength(14).toLowerCase(Locale.ROOT) + "2"; + clusterToEmptyIndexMap.put(REMOTE_CLUSTER_1, remote1IndexName); + Client remote1Client = client(REMOTE_CLUSTER_1); + assertAcked( + remote1Client.admin().indices().prepareCreate(remote1IndexName).setSettings(Settings.builder().put("index.number_of_shards", 1)) + ); + + if (numClusters == 3) { + String remote2IndexName = randomAlphaOfLength(14).toLowerCase(Locale.ROOT) + "3"; + clusterToEmptyIndexMap.put(REMOTE_CLUSTER_2, remote2IndexName); + Client remote2Client = client(REMOTE_CLUSTER_2); + assertAcked( + remote2Client.admin() + .indices() + .prepareCreate(remote2IndexName) + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + ); + } + + return clusterToEmptyIndexMap; + } + void populateLocalIndices(String indexName, int numShards) { Client localClient = client(LOCAL_CLUSTER); assertAcked( @@ -831,8 +1421,8 @@ void populateLocalIndices(String indexName, int numShards) { localClient.admin().indices().prepareRefresh(indexName).get(); } - void populateRemoteIndices(String indexName, int numShards) { - Client remoteClient = client(REMOTE_CLUSTER); + void populateRemoteIndices(String clusterAlias, String indexName, int numShards) { + Client remoteClient = client(clusterAlias); assertAcked( remoteClient.admin() .indices() @@ -845,4 +1435,23 @@ void populateRemoteIndices(String indexName, int numShards) { } remoteClient.admin().indices().prepareRefresh(indexName).get(); } + + private void setSkipUnavailable(String clusterAlias, boolean skip) { + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("cluster.remote." + clusterAlias + ".skip_unavailable", skip).build()) + .get(); + } + + private void clearSkipUnavailable() { + Settings.Builder settingsBuilder = Settings.builder() + .putNull("cluster.remote." + REMOTE_CLUSTER_1 + ".skip_unavailable") + .putNull("cluster.remote." + REMOTE_CLUSTER_2 + ".skip_unavailable"); + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder.build()) + .get(); + } } diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java new file mode 100644 index 0000000000000..1b0bff92d7d04 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ByteLength}. + * This class is generated. Do not edit it. + */ +public final class ByteLengthEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final EvalOperator.ExpressionEvaluator val; + + private final DriverContext driverContext; + + private Warnings warnings; + + public ByteLengthEvaluator(Source source, EvalOperator.ExpressionEvaluator val, + DriverContext driverContext) { + this.source = source; + this.val = val; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { + BytesRefVector valVector = valBlock.asVector(); + if (valVector == null) { + return eval(page.getPositionCount(), valBlock); + } + return eval(page.getPositionCount(), valVector).asBlock(); + } + } + + public IntBlock eval(int positionCount, BytesRefBlock valBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (valBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (valBlock.getValueCount(p) != 1) { + if (valBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendInt(ByteLength.process(valBlock.getBytesRef(valBlock.getFirstValueIndex(p), valScratch))); + } + return result.build(); + } + } + + public IntVector eval(int positionCount, BytesRefVector valVector) { + try(IntVector.FixedBuilder result = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendInt(p, ByteLength.process(valVector.getBytesRef(p, valScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "ByteLengthEvaluator[" + "val=" + val + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(val); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory val; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val) { + this.source = source; + this.val = val; + } + + @Override + public ByteLengthEvaluator get(DriverContext context) { + return new ByteLengthEvaluator(source, val.get(context), context); + } + + @Override + public String toString() { + return "ByteLengthEvaluator[" + "val=" + val + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index b0111485adbe7..d2bee9c67af5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -33,6 +33,11 @@ public enum Cap { */ FN_BIT_LENGTH, + /** + * Support for function {@code BYTE_LENGTH}. + */ + FN_BYTE_LENGTH, + /** * Support for function {@code REVERSE}. */ @@ -471,12 +476,20 @@ public enum Cap { ADD_LIMIT_INSIDE_MV_EXPAND, DELAY_DEBUG_FN(Build.current().isSnapshot()), + + /** Capability for remote metadata test */ + METADATA_FIELDS_REMOTE_TEST(false), /** * WIP on Join planning * - Introduce BinaryPlan and co * - Refactor INLINESTATS and LOOKUP as a JOIN block */ - JOIN_PLANNING_V1(Build.current().isSnapshot()); + JOIN_PLANNING_V1(Build.current().isSnapshot()), + + /** + * Support implicit casting from string literal to DATE_PERIOD or TIME_DURATION. + */ + IMPLICIT_CASTING_STRING_LITERAL_TO_TEMPORAL_AMOUNT; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9039177e0643d..562d42a94483f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.compute.data.Block; import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; @@ -31,7 +30,6 @@ import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -49,8 +47,10 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionDefinition; import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FoldablesConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -60,6 +60,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Drop; @@ -85,6 +86,8 @@ import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import java.time.Duration; +import java.time.temporal.TemporalAmount; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; @@ -106,6 +109,7 @@ import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; @@ -115,9 +119,11 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isTemporalAmount; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.LIMIT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.maybeParseTemporalAmount; /** * This class is part of the planner. Resolves references (such as variable and index names) and performs implicit casting. @@ -141,9 +147,14 @@ public class Analyzer extends ParameterizedRuleExecutor( "Resolution", + /* + * ImplicitCasting must be before ResolveRefs. Because a reference is created for a Bucket in Aggregate's aggregates, + * resolving this reference before implicit casting may cause this reference to have customMessage=true, it prevents further + * attempts to resolve this reference. + */ + new ImplicitCasting(), new ResolveRefs(), - new ResolveUnionTypes(), // Must be after ResolveRefs, so union types can be found - new ImplicitCasting() + new ResolveUnionTypes() // Must be after ResolveRefs, so union types can be found ); var finish = new Batch<>("Finish Analysis", Limiter.ONCE, new AddImplicitLimit(), new UnionTypesCleanup()); rules = List.of(init, resolution, finish); @@ -951,13 +962,15 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { } /** - * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison and In to desired data types. + * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison, In and GroupingFunction to desired data types. * For example, the string literals in the following expressions will be cast implicitly to the field data type on the left hand side. * date > "2024-08-21" * date in ("2024-08-21", "2024-08-22", "2024-08-23") * date = "2024-08-21" + 3 days * ip == "127.0.0.1" * version != "1.0" + * bucket(dateField, "1 month") + * date_trunc("1 minute", dateField) * * If the inputs to Coalesce are mixed numeric types, cast the rest of the numeric field or value to the first numeric data type if * applicable. For example, implicit casting converts: @@ -971,15 +984,18 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { private static class ImplicitCasting extends ParameterizedRule { @Override public LogicalPlan apply(LogicalPlan plan, AnalyzerContext context) { - return plan.transformExpressionsUp(ScalarFunction.class, e -> ImplicitCasting.cast(e, context.functionRegistry())); + return plan.transformExpressionsUp( + org.elasticsearch.xpack.esql.core.expression.function.Function.class, + e -> ImplicitCasting.cast(e, context.functionRegistry()) + ); } - private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) { + private static Expression cast(org.elasticsearch.xpack.esql.core.expression.function.Function f, EsqlFunctionRegistry registry) { if (f instanceof In in) { return processIn(in); } - if (f instanceof EsqlScalarFunction esf) { - return processScalarFunction(esf, registry); + if (f instanceof EsqlScalarFunction || f instanceof GroupingFunction) { // exclude AggregateFunction until it is needed + return processScalarOrGroupingFunction(f, registry); } if (f instanceof EsqlArithmeticOperation || f instanceof BinaryComparison) { return processBinaryOperator((BinaryOperator) f); @@ -987,7 +1003,10 @@ private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) return f; } - private static Expression processScalarFunction(EsqlScalarFunction f, EsqlFunctionRegistry registry) { + private static Expression processScalarOrGroupingFunction( + org.elasticsearch.xpack.esql.core.expression.function.Function f, + EsqlFunctionRegistry registry + ) { List args = f.arguments(); List targetDataTypes = registry.getDataTypeForStringLiteralConversion(f.getClass()); if (targetDataTypes == null || targetDataTypes.isEmpty()) { @@ -1010,9 +1029,11 @@ private static Expression processScalarFunction(EsqlScalarFunction f, EsqlFuncti } if (targetDataType != DataType.NULL && targetDataType != DataType.UNSUPPORTED) { Expression e = castStringLiteral(arg, targetDataType); - childrenChanged = true; - newChildren.add(e); - continue; + if (e != arg) { + childrenChanged = true; + newChildren.add(e); + continue; + } } } } else if (dataType.isNumeric() && canCastMixedNumericTypes(f) && castNumericArgs) { @@ -1094,7 +1115,7 @@ private static Expression processIn(In in) { return childrenChanged ? in.replaceChildren(newChildren) : in; } - private static boolean canCastMixedNumericTypes(EsqlScalarFunction f) { + private static boolean canCastMixedNumericTypes(org.elasticsearch.xpack.esql.core.expression.function.Function f) { return f instanceof Coalesce; } @@ -1141,19 +1162,37 @@ private static boolean supportsStringImplicitCasting(DataType type) { return type == DATETIME || type == IP || type == VERSION || type == BOOLEAN; } - public static Expression castStringLiteral(Expression from, DataType target) { + private static UnresolvedAttribute unresolvedAttribute(Expression value, String type, Exception e) { + String message = format( + "Cannot convert string [{}] to [{}], error [{}]", + value.fold(), + type, + (e instanceof ParsingException pe) ? pe.getErrorMessage() : e.getMessage() + ); + return new UnresolvedAttribute(value.source(), String.valueOf(value.fold()), message); + } + + private static Expression castStringLiteralToTemporalAmount(Expression from) { + try { + TemporalAmount result = maybeParseTemporalAmount(from.fold().toString().strip()); + if (result == null) { + return from; + } + DataType target = result instanceof Duration ? TIME_DURATION : DATE_PERIOD; + return new Literal(from.source(), result, target); + } catch (Exception e) { + return unresolvedAttribute(from, DATE_PERIOD + " or " + TIME_DURATION, e); + } + } + + private static Expression castStringLiteral(Expression from, DataType target) { assert from.foldable(); try { - Object to = EsqlDataTypeConverter.convert(from.fold(), target); - return new Literal(from.source(), to, target); + return isTemporalAmount(target) + ? castStringLiteralToTemporalAmount(from) + : new Literal(from.source(), EsqlDataTypeConverter.convert(from.fold(), target), target); } catch (Exception e) { - String message = LoggerMessageFormat.format( - "Cannot convert string [{}] to [{}], error [{}]", - from.fold(), - target, - e.getMessage() - ); - return new UnresolvedAttribute(from.source(), String.valueOf(from.fold()), message); + return unresolvedAttribute(from, target.toString(), e); } } } @@ -1226,6 +1265,16 @@ private Expression resolveConvertFunction(AbstractConvertFunction convert, List< if (convert.field() instanceof FieldAttribute fa && fa.field() instanceof InvalidMappedField imf) { HashMap typeResolutions = new HashMap<>(); Set supportedTypes = convert.supportedTypes(); + if (convert instanceof FoldablesConvertFunction fcf) { + // FoldablesConvertFunction does not accept fields as inputs, they only accept constants + String unresolvedMessage = "argument of [" + + fcf.sourceText() + + "] must be a constant, received [" + + Expressions.name(fa) + + "]"; + Expression ua = new UnresolvedAttribute(fa.source(), fa.name(), unresolvedMessage); + return fcf.replaceChildren(Collections.singletonList(ua)); + } imf.types().forEach(type -> { if (supportedTypes.contains(type.widenSmallNumeric())) { TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 632f52d163349..d399c826e0bf2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.function.Function; @@ -33,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; import org.elasticsearch.xpack.esql.expression.function.fulltext.QueryString; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; @@ -56,10 +58,12 @@ import java.util.ArrayList; import java.util.BitSet; import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -271,6 +275,7 @@ private static void checkAggregate(LogicalPlan p, Set failures) { r -> failures.add(fail(r, "the rate aggregate[{}] can only be used within the metrics command", r.sourceText())) ); } + checkCategorizeGrouping(agg, failures); } else { p.forEachExpression( GroupingFunction.class, @@ -279,6 +284,74 @@ private static void checkAggregate(LogicalPlan p, Set failures) { } } + /** + * Check CATEGORIZE grouping function usages. + *

+ * Some of those checks are temporary, until the required syntax or engine changes are implemented. + *

+ */ + private static void checkCategorizeGrouping(Aggregate agg, Set failures) { + // Forbid CATEGORIZE grouping function with other groupings + if (agg.groupings().size() > 1) { + agg.groupings().forEach(g -> { + g.forEachDown( + Categorize.class, + categorize -> failures.add( + fail(categorize, "cannot use CATEGORIZE grouping function [{}] with multiple groupings", categorize.sourceText()) + ) + ); + }); + } + + // Forbid CATEGORIZE grouping functions not being top level groupings + agg.groupings().forEach(g -> { + // Check all CATEGORIZE but the top level one + Alias.unwrap(g) + .children() + .forEach( + child -> child.forEachDown( + Categorize.class, + c -> failures.add( + fail(c, "CATEGORIZE grouping function [{}] can't be used within other expressions", c.sourceText()) + ) + ) + ); + }); + + // Forbid CATEGORIZE being used in the aggregations + agg.aggregates().forEach(a -> { + a.forEachDown( + Categorize.class, + categorize -> failures.add( + fail(categorize, "cannot use CATEGORIZE grouping function [{}] within the aggregations", categorize.sourceText()) + ) + ); + }); + + // Forbid CATEGORIZE being referenced in the aggregation functions + Map categorizeByAliasId = new HashMap<>(); + agg.groupings().forEach(g -> { + g.forEachDown(Alias.class, alias -> { + if (alias.child() instanceof Categorize categorize) { + categorizeByAliasId.put(alias.id(), categorize); + } + }); + }); + agg.aggregates() + .forEach(a -> a.forEachDown(AggregateFunction.class, aggregate -> aggregate.forEachDown(Attribute.class, attribute -> { + var categorize = categorizeByAliasId.get(attribute.id()); + if (categorize != null) { + failures.add( + fail( + attribute, + "cannot reference CATEGORIZE grouping function [{}] within the aggregations", + attribute.sourceText() + ) + ); + } + }))); + } + private static void checkRateAggregates(Expression expr, int nestedLevel, Set failures) { if (expr instanceof AggregateFunction) { nestedLevel++; @@ -688,7 +761,7 @@ private static void checkFullTextQueryFunctions(LogicalPlan plan, Set f plan, condition, Match.class, - lp -> (lp instanceof Limit == false), + lp -> (lp instanceof Limit == false) && (lp instanceof Aggregate == false), m -> "[" + m.functionName() + "] " + m.functionType(), failures ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java index 2419aa83845a8..286ddbaa29a5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -45,6 +45,7 @@ import org.elasticsearch.compute.operator.lookup.MergePositionsOperator; import org.elasticsearch.compute.operator.lookup.QueryList; import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -185,7 +186,7 @@ protected static QueryList termQueryList( return switch (inputDataType) { case IP -> QueryList.ipTermQueryList(field, searchExecutionContext, (BytesRefBlock) block); case DATETIME -> QueryList.dateTermQueryList(field, searchExecutionContext, (LongBlock) block); - default -> QueryList.rawTermQueryList(field, searchExecutionContext, block); + case null, default -> QueryList.rawTermQueryList(field, searchExecutionContext, block); }; } @@ -459,6 +460,10 @@ abstract static class Request { abstract static class TransportRequest extends org.elasticsearch.transport.TransportRequest implements IndicesRequest { final String sessionId; final ShardId shardId; + /** + * For mixed clusters with nodes <8.14, this will be null. + */ + @Nullable final DataType inputDataType; final Page inputPage; final List extractFields; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index f24a16bb63697..2d85b46e33a8c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -127,9 +127,9 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro TaskId parentTaskId = TaskId.readFromStream(in); String sessionId = in.readString(); ShardId shardId = new ShardId(in); - DataType inputDataType = DataType.fromTypeName( - (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) ? in.readString() : "unknown" - ); + DataType inputDataType = (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) + ? DataType.fromTypeName(in.readString()) + : null; String matchType = in.readString(); String matchField = in.readString(); Page inputPage; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 7a6ff79d79a65..ca02441d2e1ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -118,6 +118,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; @@ -159,27 +160,30 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.IP; -import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; +import static org.elasticsearch.xpack.esql.core.type.DataType.isString; public class EsqlFunctionRegistry { - private static final Map, List> dataTypesForStringLiteralConversion = new LinkedHashMap<>(); + private static final Map, List> DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS = new LinkedHashMap<>(); - private static final Map dataTypeCastingPriority; + private static final Map DATA_TYPE_CASTING_PRIORITY; static { List typePriorityList = Arrays.asList( DATETIME, + DATE_PERIOD, + TIME_DURATION, DOUBLE, LONG, INTEGER, @@ -193,9 +197,9 @@ public class EsqlFunctionRegistry { UNSIGNED_LONG, UNSUPPORTED ); - dataTypeCastingPriority = new HashMap<>(); + DATA_TYPE_CASTING_PRIORITY = new HashMap<>(); for (int i = 0; i < typePriorityList.size(); i++) { - dataTypeCastingPriority.put(typePriorityList.get(i), i); + DATA_TYPE_CASTING_PRIORITY.put(typePriorityList.get(i), i); } } @@ -256,7 +260,7 @@ public Collection listFunctions(String pattern) { .collect(toList()); } - private FunctionDefinition[][] functions() { + private static FunctionDefinition[][] functions() { return new FunctionDefinition[][] { // grouping functions new FunctionDefinition[] { def(Bucket.class, Bucket::new, "bucket", "bin"), }, @@ -308,6 +312,7 @@ private FunctionDefinition[][] functions() { // string new FunctionDefinition[] { def(BitLength.class, BitLength::new, "bit_length"), + def(ByteLength.class, ByteLength::new, "byte_length"), def(Concat.class, Concat::new, "concat"), def(EndsWith.class, EndsWith::new, "ends_with"), def(LTrim.class, LTrim::new, "ltrim"), @@ -435,6 +440,11 @@ public static String normalizeName(String name) { } public record ArgSignature(String name, String[] type, String description, boolean optional, DataType targetDataType) { + + public ArgSignature(String name, String[] type, String description, boolean optional) { + this(name, type, description, optional, UNSUPPORTED); + } + @Override public String toString() { return "ArgSignature{" @@ -475,17 +485,24 @@ public List argDescriptions() { } } - public static DataType getTargetType(String[] names) { + /** + * Build a list target data types, which is used by ImplicitCasting to convert string literals to a target data type. + */ + private static DataType getTargetType(String[] names) { List types = new ArrayList<>(); for (String name : names) { - types.add(DataType.fromEs(name)); - } - if (types.contains(KEYWORD) || types.contains(TEXT)) { - return UNSUPPORTED; + DataType type = DataType.fromTypeName(name); + if (type != null && type != UNSUPPORTED) { // A type should not be null or UNSUPPORTED, just a sanity check here + // If the function takes strings as input, there is no need to cast a string literal to it. + // Return UNSUPPORTED means that ImplicitCasting doesn't support this argument, and it will be skipped by ImplicitCasting. + if (isString(type)) { + return UNSUPPORTED; + } + types.add(type); + } } - return types.stream() - .min((dt1, dt2) -> dataTypeCastingPriority.get(dt1).compareTo(dataTypeCastingPriority.get(dt2))) + .min((dt1, dt2) -> DATA_TYPE_CASTING_PRIORITY.get(dt1).compareTo(DATA_TYPE_CASTING_PRIORITY.get(dt2))) .orElse(UNSUPPORTED); } @@ -557,7 +574,7 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr for (FunctionDefinition[] group : groupFunctions) { for (FunctionDefinition def : group) { FunctionDescription signature = description(def); - dataTypesForStringLiteralConversion.put( + DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS.put( def.clazz(), signature.args().stream().map(EsqlFunctionRegistry.ArgSignature::targetDataType).collect(Collectors.toList()) ); @@ -566,7 +583,7 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr } public List getDataTypeForStringLiteralConversion(Class clazz) { - return dataTypesForStringLiteralConversion.get(clazz); + return DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS.get(clazz); } private static class SnapshotFunctionRegistry extends EsqlFunctionRegistry { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index e9ca69055658d..610fe1c5ea000 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -55,6 +55,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; @@ -80,6 +81,7 @@ public static List getNamedWriteables() { entries.add(Acos.ENTRY); entries.add(Asin.ENTRY); entries.add(Atan.ENTRY); + entries.add(ByteLength.ENTRY); entries.add(Cbrt.ENTRY); entries.add(Ceil.ENTRY); entries.add(Cos.ENTRY); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java index 6e2b5bb63532d..8f43a6481db07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java @@ -59,7 +59,8 @@ protected final TypeResolution resolveType() { @Override protected final Map factories() { - // TODO if a union type field is provided as an input, the correct error message is not shown, #112668 is a follow up + // This is used by ResolveUnionTypes, which is expected to be applied to ES fields only + // FoldablesConvertFunction takes only constants as inputs, so this is empty return Map.of(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java index 5deb6fa7feba6..ad8b46df29df2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java @@ -40,6 +40,7 @@ public class BitLength extends UnaryScalarFunction { @FunctionInfo( returnType = "integer", description = "Returns the bit length of a string.", + note = "All strings are in UTF-8, so a single character can use multiple bytes.", examples = @Example(file = "docs", tag = "bitLength") ) public BitLength( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java new file mode 100644 index 0000000000000..f967b20b8be32 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; + +public class ByteLength extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "ByteLength", + ByteLength::new + ); + + @FunctionInfo( + returnType = "integer", + description = "Returns the byte length of a string.", + note = "All strings are in UTF-8, so a single character can use multiple bytes.", + examples = @Example(file = "eval", tag = "byteLength") + ) + public ByteLength( + Source source, + @Param( + name = "string", + type = { "keyword", "text" }, + description = "String expression. If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private ByteLength(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public DataType dataType() { + return DataType.INTEGER; + } + + @Override + protected TypeResolution resolveType() { + return childrenResolved() ? isString(field(), sourceText(), DEFAULT) : new TypeResolution("Unresolved children"); + } + + @Evaluator + static int process(BytesRef val) { + return val.length; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ByteLength(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ByteLength::new, field()); + } + + @Override + public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + return new ByteLengthEvaluator.Factory(source(), toEvaluator.apply(field())); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java index f4bb7f35cb466..3b442a8583a0a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java @@ -34,6 +34,7 @@ public class Length extends UnaryScalarFunction { @FunctionInfo( returnType = "integer", description = "Returns the character length of a string.", + note = "All strings are in UTF-8, so a single character can use multiple bytes.", examples = @Example(file = "eval", tag = "length") ) public Length( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java index b2eaefcf09d65..88366bbf9a7c3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java @@ -12,22 +12,37 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.Set; public final class IndexResolution { - public static IndexResolution valid(EsIndex index, Map unavailableClusters) { + /** + * @param index EsIndex encapsulating requested index expression, resolved mappings and index modes from field-caps. + * @param resolvedIndices Set of concrete indices resolved by field-caps. (This information is not always present in the EsIndex). + * @param unavailableClusters Remote clusters that could not be contacted during planning + * @return valid IndexResolution + */ + public static IndexResolution valid( + EsIndex index, + Set resolvedIndices, + Map unavailableClusters + ) { Objects.requireNonNull(index, "index must not be null if it was found"); + Objects.requireNonNull(resolvedIndices, "resolvedIndices must not be null"); Objects.requireNonNull(unavailableClusters, "unavailableClusters must not be null"); - return new IndexResolution(index, null, unavailableClusters); + return new IndexResolution(index, null, resolvedIndices, unavailableClusters); } + /** + * Use this method only if the set of concrete resolved indices is the same as EsIndex#concreteIndices(). + */ public static IndexResolution valid(EsIndex index) { - return valid(index, Collections.emptyMap()); + return valid(index, index.concreteIndices(), Collections.emptyMap()); } public static IndexResolution invalid(String invalid) { Objects.requireNonNull(invalid, "invalid must not be null to signal that the index is invalid"); - return new IndexResolution(null, invalid, Collections.emptyMap()); + return new IndexResolution(null, invalid, Collections.emptySet(), Collections.emptyMap()); } public static IndexResolution notFound(String name) { @@ -39,12 +54,20 @@ public static IndexResolution notFound(String name) { @Nullable private final String invalid; + // all indices found by field-caps + private final Set resolvedIndices; // remote clusters included in the user's index expression that could not be connected to private final Map unavailableClusters; - private IndexResolution(EsIndex index, @Nullable String invalid, Map unavailableClusters) { + private IndexResolution( + EsIndex index, + @Nullable String invalid, + Set resolvedIndices, + Map unavailableClusters + ) { this.index = index; this.invalid = invalid; + this.resolvedIndices = resolvedIndices; this.unavailableClusters = unavailableClusters; } @@ -64,8 +87,8 @@ public EsIndex get() { } /** - * Is the index valid for use with ql? Returns {@code false} if the - * index wasn't found. + * Is the index valid for use with ql? + * @return {@code false} if the index wasn't found. */ public boolean isValid() { return invalid == null; @@ -75,10 +98,17 @@ public boolean isValid() { * @return Map of unavailable clusters (could not be connected to during field-caps query). Key of map is cluster alias, * value is the {@link FieldCapabilitiesFailure} describing the issue. */ - public Map getUnavailableClusters() { + public Map unavailableClusters() { return unavailableClusters; } + /** + * @return all indices found by field-caps (regardless of whether they had any mappings) + */ + public Set resolvedIndices() { + return resolvedIndices; + } + @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -87,16 +117,29 @@ public boolean equals(Object obj) { IndexResolution other = (IndexResolution) obj; return Objects.equals(index, other.index) && Objects.equals(invalid, other.invalid) + && Objects.equals(resolvedIndices, other.resolvedIndices) && Objects.equals(unavailableClusters, other.unavailableClusters); } @Override public int hashCode() { - return Objects.hash(index, invalid, unavailableClusters); + return Objects.hash(index, invalid, resolvedIndices, unavailableClusters); } @Override public String toString() { - return invalid != null ? invalid : index.name(); + return invalid != null + ? invalid + : "IndexResolution{" + + "index=" + + index + + ", invalid='" + + invalid + + '\'' + + ", resolvedIndices=" + + resolvedIndices + + ", unavailableClusters=" + + unavailableClusters + + '}'; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index 44334ff112bad..3da07e9485af7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEmptyRelation; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsFilteredAggWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferIsNotNull; import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferNonNullAggConstraint; import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.LocalPropagateEmptyRelation; @@ -15,6 +16,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.ReplaceTopNWithLimitAndSort; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.rule.ParameterizedRuleExecutor; +import org.elasticsearch.xpack.esql.rule.Rule; import java.util.ArrayList; import java.util.List; @@ -50,20 +52,31 @@ protected List> batches() { rules.add(local); // TODO: if the local rules haven't touched the tree, the rest of the rules can be skipped rules.addAll(asList(operators(), cleanup())); - replaceRules(rules); - return rules; + return replaceRules(rules); } + @SuppressWarnings("unchecked") private List> replaceRules(List> listOfRules) { - for (Batch batch : listOfRules) { + List> newBatches = new ArrayList<>(listOfRules.size()); + for (var batch : listOfRules) { var rules = batch.rules(); - for (int i = 0; i < rules.length; i++) { - if (rules[i] instanceof PropagateEmptyRelation) { - rules[i] = new LocalPropagateEmptyRelation(); + List> newRules = new ArrayList<>(rules.length); + boolean updated = false; + for (var r : rules) { + if (r instanceof PropagateEmptyRelation) { + newRules.add(new LocalPropagateEmptyRelation()); + updated = true; + } else if (r instanceof ReplaceStatsFilteredAggWithEval) { + // skip it: once a fragment contains an Agg, this can no longer be pruned, which the rule can do + updated = true; + } else { + newRules.add(r); } } + batch = updated ? batch.with(newRules.toArray(Rule[]::new)) : batch; + newBatches.add(batch); } - return listOfRules; + return newBatches; } public LogicalPlan localOptimize(LogicalPlan plan) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 77c5a494437ab..5007b011092f0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineProjections; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConstantFolding; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConvertStringToByteRef; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ExtractAggregateCommonFilter; import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; import org.elasticsearch.xpack.esql.optimizer.rules.logical.LiteralsOnTheRight; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PartiallyFoldCase; @@ -46,6 +47,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLimitAndSortAsTopN; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceOrderByExpressionWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceRegexMatch; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsFilteredAggWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceTrivialTypeConversions; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SetAsOptimized; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SimplifyComparisonsArithmetics; @@ -123,8 +125,9 @@ protected static Batch substitutions() { "Substitutions", Limiter.ONCE, new SubstituteSurrogatePlans(), - // translate filtered expressions into aggregate with filters - can't use surrogate expressions because it was - // retrofitted for constant folding - this needs to be fixed + // Translate filtered expressions into aggregate with filters - can't use surrogate expressions because it was + // retrofitted for constant folding - this needs to be fixed. + // Needs to occur before ReplaceAggregateAggExpressionWithEval, which will update the functions, losing the filter. new SubstituteFilteredExpression(), new RemoveStatsOverride(), // first extract nested expressions inside aggs @@ -169,7 +172,10 @@ protected static Batch operators() { new BooleanFunctionEqualsElimination(), new CombineBinaryComparisons(), new CombineDisjunctions(), + // TODO: bifunction can now (since we now have just one data types set) be pushed into the rule new SimplifyComparisonsArithmetics(DataType::areCompatible), + new ReplaceStatsFilteredAggWithEval(), + new ExtractAggregateCommonFilter(), // prune/elimination new PruneFilters(), new PruneColumns(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ExtractAggregateCommonFilter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ExtractAggregateCommonFilter.java new file mode 100644 index 0000000000000..f00a8103f913e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ExtractAggregateCommonFilter.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.predicate.Predicates.extractCommon; + +/** + * Extract a per-function expression filter applied to all the aggs as a query {@link Filter}, when no groups are provided. + *

+ * Example: + *

+ *         ... | STATS MIN(a) WHERE b > 0, MIN(c) WHERE b > 0 | ...
+ *         =>
+ *         ... | WHERE b > 0 | STATS MIN(a), MIN(c) | ...
+ *     
+ */ +public final class ExtractAggregateCommonFilter extends OptimizerRules.OptimizerRule { + public ExtractAggregateCommonFilter() { + super(OptimizerRules.TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(Aggregate aggregate) { + if (aggregate.groupings().isEmpty() == false) { + return aggregate; // no optimization for grouped stats + } + + // collect all filters from the agg functions + List filters = new ArrayList<>(aggregate.aggregates().size()); + for (NamedExpression ne : aggregate.aggregates()) { + if (ne instanceof Alias alias && alias.child() instanceof AggregateFunction aggFunction && aggFunction.hasFilter()) { + filters.add(aggFunction.filter()); + } else { + return aggregate; // (at least one) agg function has no filter -- skip optimization + } + } + + // extract common filters + var common = extractCommon(filters); + if (common.v1() == null) { // no common filter + return aggregate; + } + + // replace agg functions' filters with trimmed ones + var newFilters = common.v2(); + List newAggs = new ArrayList<>(aggregate.aggregates().size()); + for (int i = 0; i < aggregate.aggregates().size(); i++) { + var alias = (Alias) aggregate.aggregates().get(i); + var newChild = ((AggregateFunction) alias.child()).withFilter(newFilters.get(i)); + newAggs.add(alias.replaceChild(newChild)); + } + + // build the new agg on top of extracted filter + return new Aggregate( + aggregate.source(), + new Filter(aggregate.source(), aggregate.child(), common.v1()), + aggregate.aggregateType(), + aggregate.groupings(), + newAggs + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java new file mode 100644 index 0000000000000..2cafcc2e07052 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.util.ArrayList; +import java.util.List; + +/** + * Replaces an aggregation function having a false/null filter with an EVAL node. + *
+ *     ... | STATS x = someAgg(y) WHERE FALSE {BY z} | ...
+ *     =>
+ *     ... | STATS x = someAgg(y) {BY z} > | EVAL x = NULL | KEEP x{, z} | ...
+ * 
+ */ +public class ReplaceStatsFilteredAggWithEval extends OptimizerRules.OptimizerRule { + @Override + protected LogicalPlan rule(Aggregate aggregate) { + int oldAggSize = aggregate.aggregates().size(); + List newAggs = new ArrayList<>(oldAggSize); + List newEvals = new ArrayList<>(oldAggSize); + List newProjections = new ArrayList<>(oldAggSize); + + for (var ne : aggregate.aggregates()) { + if (ne instanceof Alias alias + && alias.child() instanceof AggregateFunction aggFunction + && aggFunction.hasFilter() + && aggFunction.filter() instanceof Literal literal + && Boolean.FALSE.equals(literal.fold())) { + + Object value = aggFunction instanceof Count || aggFunction instanceof CountDistinct ? 0L : null; + Alias newAlias = alias.replaceChild(Literal.of(aggFunction, value)); + newEvals.add(newAlias); + newProjections.add(newAlias.toAttribute()); + } else { + newAggs.add(ne); // agg function unchanged or grouping key + newProjections.add(ne.toAttribute()); + } + } + + LogicalPlan plan = aggregate; + if (newEvals.isEmpty() == false) { + if (newAggs.isEmpty()) { // the Aggregate node is pruned + plan = localRelation(aggregate.source(), newEvals); + } else { + plan = aggregate.with(aggregate.child(), aggregate.groupings(), newAggs); + plan = new Eval(aggregate.source(), plan, newEvals); + plan = new Project(aggregate.source(), plan, newProjections); + } + } + return plan; + } + + private static LocalRelation localRelation(Source source, List newEvals) { + Block[] blocks = new Block[newEvals.size()]; + List attributes = new ArrayList<>(newEvals.size()); + for (int i = 0; i < newEvals.size(); i++) { + Alias alias = newEvals.get(i); + attributes.add(alias.toAttribute()); + blocks[i] = BlockUtils.constantBlock(PlannerUtils.NON_BREAKING_BLOCK_FACTORY, ((Literal) alias.child()).value(), 1); + } + return new LocalRelation(source, attributes, LocalSupplier.of(blocks)); + + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index ffad379001ed0..76de337ded5c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -251,19 +251,17 @@ private static void updateExecutionInfoAfterCoordinatorOnlyQuery(EsqlExecutionIn if (execInfo.isCrossClusterSearch()) { assert execInfo.planningTookTime() != null : "Planning took time should be set on EsqlExecutionInfo but is null"; for (String clusterAlias : execInfo.clusterAliases()) { - // took time and shard counts for SKIPPED clusters were added at end of planning, so only update other cases here - if (execInfo.getCluster(clusterAlias).getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { - execInfo.swapCluster( - clusterAlias, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTook(execInfo.overallTook()) - .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .build() - ); - } + execInfo.swapCluster(clusterAlias, (k, v) -> { + var builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook(execInfo.overallTook()) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0); + if (v.getStatus() == EsqlExecutionInfo.Cluster.Status.RUNNING) { + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); + } + return builder.build(); + }); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java index 3d73c0d45e9a0..7df5a029d724e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java @@ -68,6 +68,10 @@ public String name() { return name; } + public Batch with(Rule[] rules) { + return new Batch<>(name, limit, rules); + } + public Rule[] rules() { return rules; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 504689fdac39b..c576d15f92608 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -309,7 +309,7 @@ private void preAnalyze( // resolution to updateExecutionInfo if (indexResolution.isValid()) { EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); - EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.unavailableClusters()); if (executionInfo.isCrossClusterSearch() && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index 80709d8f6c4f7..4fe2fef7e3f45 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -17,6 +17,7 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.index.IndexResolution; @@ -33,7 +34,6 @@ class EsqlSessionCCSUtils { private EsqlSessionCCSUtils() {} - // visible for testing static Map determineUnavailableRemoteClusters(List failures) { Map unavailableRemotes = new HashMap<>(); for (FieldCapabilitiesFailure failure : failures) { @@ -75,10 +75,10 @@ public void onFailure(Exception e) { /** * Whether to return an empty result (HTTP status 200) for a CCS rather than a top level 4xx/5xx error. - * + *

* For cases where field-caps had no indices to search and the remotes were unavailable, we * return an empty successful response (200) if all remotes are marked with skip_unavailable=true. - * + *

* Note: a follow-on PR will expand this logic to handle cases where no indices could be found to match * on any of the requested clusters. */ @@ -132,7 +132,6 @@ static void updateExecutionInfoToReturnEmptyResult(EsqlExecutionInfo executionIn } } - // visible for testing static String createIndexExpressionFromAvailableClusters(EsqlExecutionInfo executionInfo) { StringBuilder sb = new StringBuilder(); for (String clusterAlias : executionInfo.clusterAliases()) { @@ -181,39 +180,91 @@ static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo execInf } } - // visible for testing static void updateExecutionInfoWithClustersWithNoMatchingIndices(EsqlExecutionInfo executionInfo, IndexResolution indexResolution) { Set clustersWithResolvedIndices = new HashSet<>(); // determine missing clusters - for (String indexName : indexResolution.get().indexNameWithModes().keySet()) { + for (String indexName : indexResolution.resolvedIndices()) { clustersWithResolvedIndices.add(RemoteClusterAware.parseClusterAlias(indexName)); } Set clustersRequested = executionInfo.clusterAliases(); Set clustersWithNoMatchingIndices = Sets.difference(clustersRequested, clustersWithResolvedIndices); - clustersWithNoMatchingIndices.removeAll(indexResolution.getUnavailableClusters().keySet()); + clustersWithNoMatchingIndices.removeAll(indexResolution.unavailableClusters().keySet()); + + /** + * Rules enforced at planning time around non-matching indices + * P1. fail query if no matching indices on any cluster (VerificationException) - that is handled elsewhere (TODO: document where) + * P2. fail query if a skip_unavailable:false cluster has no matching indices (the local cluster already has this rule + * enforced at planning time) + * P3. fail query if the local cluster has no matching indices and a concrete index was specified + */ + String fatalErrorMessage = null; /* * These are clusters in the original request that are not present in the field-caps response. They were - * specified with an index or indices that do not exist, so the search on that cluster is done. + * specified with an index expression matched no indices, so the search on that cluster is done. * Mark it as SKIPPED with 0 shards searched and took=0. */ for (String c : clustersWithNoMatchingIndices) { - // TODO: in a follow-on PR, throw a Verification(400 status code) for local and remotes with skip_unavailable=false if - // they were requested with one or more concrete indices - // for now we never mark the local cluster as SKIPPED - final var status = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(c) - ? EsqlExecutionInfo.Cluster.Status.SUCCESSFUL - : EsqlExecutionInfo.Cluster.Status.SKIPPED; - executionInfo.swapCluster( - c, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) - .setTook(new TimeValue(0)) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .build() - ); + final String indexExpression = executionInfo.getCluster(c).getIndexExpression(); + if (missingIndicesIsFatal(c, executionInfo)) { + String error = Strings.format( + "Unknown index [%s]", + (c.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) ? indexExpression : c + ":" + indexExpression) + ); + if (fatalErrorMessage == null) { + fatalErrorMessage = error; + } else { + fatalErrorMessage += "; " + error; + } + } else { + // handles local cluster (when no concrete indices requested) and skip_unavailable=true clusters + EsqlExecutionInfo.Cluster.Status status; + ShardSearchFailure failure; + if (c.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + status = EsqlExecutionInfo.Cluster.Status.SUCCESSFUL; + failure = null; + } else { + status = EsqlExecutionInfo.Cluster.Status.SKIPPED; + failure = new ShardSearchFailure(new VerificationException("Unknown index [" + indexExpression + "]")); + } + executionInfo.swapCluster(c, (k, v) -> { + var builder = new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) + .setTook(new TimeValue(0)) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0); + if (failure != null) { + builder.setFailures(List.of(failure)); + } + return builder.build(); + }); + } } + if (fatalErrorMessage != null) { + throw new VerificationException(fatalErrorMessage); + } + } + + // visible for testing + static boolean missingIndicesIsFatal(String clusterAlias, EsqlExecutionInfo executionInfo) { + // missing indices on local cluster is fatal only if a concrete index requested + if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + return concreteIndexRequested(executionInfo.getCluster(clusterAlias).getIndexExpression()); + } + return executionInfo.getCluster(clusterAlias).isSkipUnavailable() == false; + } + + private static boolean concreteIndexRequested(String indexExpression) { + for (String expr : indexExpression.split(",")) { + if (expr.charAt(0) == '<' || expr.startsWith("-<")) { + // skip date math expressions + continue; + } + if (expr.indexOf('*') < 0) { + return true; + } + } + return false; } // visible for testing diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index 210f991306bac..0be8cf820d345 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.session; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -143,21 +144,24 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp fields.put(name, field); } + Map unavailableRemotes = EsqlSessionCCSUtils.determineUnavailableRemoteClusters( + fieldCapsResponse.getFailures() + ); + + Map concreteIndices = Maps.newMapWithExpectedSize(fieldCapsResponse.getIndexResponses().size()); + for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { + concreteIndices.put(ir.getIndexName(), ir.getIndexMode()); + } + boolean allEmpty = true; for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { allEmpty &= ir.get().isEmpty(); } if (allEmpty) { // If all the mappings are empty we return an empty set of resolved indices to line up with QL - return IndexResolution.valid(new EsIndex(indexPattern, rootFields, Map.of())); - } - - Map concreteIndices = Maps.newMapWithExpectedSize(fieldCapsResponse.getIndexResponses().size()); - for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { - concreteIndices.put(ir.getIndexName(), ir.getIndexMode()); + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, Map.of()), concreteIndices.keySet(), unavailableRemotes); } - EsIndex esIndex = new EsIndex(indexPattern, rootFields, concreteIndices); - return IndexResolution.valid(esIndex, EsqlSessionCCSUtils.determineUnavailableRemoteClusters(fieldCapsResponse.getFailures())); + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices), concreteIndices.keySet(), unavailableRemotes); } private boolean allNested(List caps) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index c9c292769b570..4bfc9ac5d848f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -274,27 +274,11 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy return null; } StringBuilder value = new StringBuilder(); - StringBuilder qualifier = new StringBuilder(); - StringBuilder nextBuffer = value; - boolean lastWasSpace = false; - for (char c : str.trim().toCharArray()) { - if (c == ' ') { - if (lastWasSpace == false) { - nextBuffer = nextBuffer == value ? qualifier : null; - } - lastWasSpace = true; - continue; - } - if (nextBuffer == null) { - throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); - } - nextBuffer.append(c); - lastWasSpace = false; - } - - if ((value.isEmpty() || qualifier.isEmpty()) == false) { + StringBuilder temporalUnit = new StringBuilder(); + separateValueAndTemporalUnitForTemporalAmount(str.strip(), value, temporalUnit, errorMessage, expectedType.toString()); + if ((value.isEmpty() || temporalUnit.isEmpty()) == false) { try { - TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), qualifier.toString(), Source.EMPTY); + TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), temporalUnit.toString(), Source.EMPTY); if (DataType.DATE_PERIOD == expectedType && result instanceof Period || DataType.TIME_DURATION == expectedType && result instanceof Duration) { return result; @@ -312,6 +296,48 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); } + public static TemporalAmount maybeParseTemporalAmount(String str) { + // The string literal can be either Date_Period or Time_Duration, derive the data type from its temporal unit + String errorMessage = "Cannot parse [{}] to {}"; + String expectedTypes = DATE_PERIOD + " or " + TIME_DURATION; + StringBuilder value = new StringBuilder(); + StringBuilder temporalUnit = new StringBuilder(); + separateValueAndTemporalUnitForTemporalAmount(str, value, temporalUnit, errorMessage, expectedTypes); + if ((value.isEmpty() || temporalUnit.isEmpty()) == false) { + try { + return parseTemporalAmount(Integer.parseInt(value.toString()), temporalUnit.toString(), Source.EMPTY); + } catch (NumberFormatException ex) { + throw new ParsingException(Source.EMPTY, errorMessage, str, expectedTypes); + } + } + return null; + } + + private static void separateValueAndTemporalUnitForTemporalAmount( + String temporalAmount, + StringBuilder value, + StringBuilder temporalUnit, + String errorMessage, + String expectedType + ) { + StringBuilder nextBuffer = value; + boolean lastWasSpace = false; + for (char c : temporalAmount.toCharArray()) { + if (c == ' ') { + if (lastWasSpace == false) { + nextBuffer = nextBuffer == value ? temporalUnit : null; + } + lastWasSpace = true; + continue; + } + if (nextBuffer == null) { + throw new ParsingException(Source.EMPTY, errorMessage, temporalAmount, expectedType); + } + nextBuffer.append(c); + lastWasSpace = false; + } + } + /** * Converts arbitrary object to the desired data type. *

@@ -394,10 +420,10 @@ public static DataType commonType(DataType left, DataType right) { } // generally supporting abbreviations from https://en.wikipedia.org/wiki/Unit_of_time - public static TemporalAmount parseTemporalAmount(Number value, String qualifier, Source source) throws InvalidArgumentException, + public static TemporalAmount parseTemporalAmount(Number value, String temporalUnit, Source source) throws InvalidArgumentException, ArithmeticException, ParsingException { try { - return switch (INTERVALS.valueOf(qualifier.toUpperCase(Locale.ROOT))) { + return switch (INTERVALS.valueOf(temporalUnit.toUpperCase(Locale.ROOT))) { case MILLISECOND, MILLISECONDS, MS -> Duration.ofMillis(safeToLong(value)); case SECOND, SECONDS, SEC, S -> Duration.ofSeconds(safeToLong(value)); case MINUTE, MINUTES, MIN -> Duration.ofMinutes(safeToLong(value)); @@ -410,7 +436,7 @@ public static TemporalAmount parseTemporalAmount(Number value, String qualifier, case YEAR, YEARS, YR, Y -> Period.ofYears(safeToInt(safeToLong(value))); }; } catch (IllegalArgumentException e) { - throw new ParsingException(source, "Unexpected time interval qualifier: '{}'", qualifier); + throw new ParsingException(source, "Unexpected temporal unit: '{}'", temporalUnit); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index d6cda4a3a9ff7..8b364a603405c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -244,6 +244,34 @@ public void testUnsupportedAndMultiTypedFields() { + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); + + for (String functionName : List.of("to_timeduration", "to_dateperiod")) { + String lineNumber = functionName.equalsIgnoreCase("to_timeduration") ? "47" : "45"; + String errorType = functionName.equalsIgnoreCase("to_timeduration") ? "time_duration" : "date_period"; + assertEquals( + "1:" + lineNumber + ": Cannot use field [unsupported] with unsupported type [flattened]", + error("from test* | eval x = now() + " + functionName + "(unsupported)", analyzer) + ); + assertEquals( + "1:" + lineNumber + ": argument of [" + functionName + "(multi_typed)] must be a constant, received [multi_typed]", + error("from test* | eval x = now() + " + functionName + "(multi_typed)", analyzer) + ); + assertThat( + error("from test* | eval x = unsupported, y = now() + " + functionName + "(x)", analyzer), + containsString("1:23: Cannot use field [unsupported] with unsupported type [flattened]") + ); + assertThat( + error("from test* | eval x = multi_typed, y = now() + " + functionName + "(x)", analyzer), + containsString( + "1:48: argument of [" + + functionName + + "(x)] must be [" + + errorType + + " or string], " + + "found value [x] type [unsupported]" + ) + ); + } } public void testRoundFunctionInvalidInputs() { @@ -1155,6 +1183,10 @@ public void testMatchFunctionNotAllowedAfterCommands() throws Exception { "1:24: [MATCH] function cannot be used after LIMIT", error("from test | limit 10 | where match(first_name, \"Anna\")") ); + assertEquals( + "1:47: [MATCH] function cannot be used after STATS", + error("from test | STATS c = AVG(salary) BY gender | where match(gender, \"F\")") + ); } public void testMatchFunctionAndOperatorHaveCorrectErrorMessages() throws Exception { @@ -1639,6 +1671,134 @@ public void testToDatePeriodToTimeDurationWithInvalidType() { ); } + public void testIntervalAsString() { + // DateTrunc + for (String interval : List.of("1 minu", "1 dy", "1.5 minutes", "0.5 days", "minutes 1", "day 5")) { + assertThat( + error("from types | EVAL x = date_trunc(\"" + interval + "\", \"1991-06-26T00:00:00.000Z\")"), + containsString("1:35: Cannot convert string [" + interval + "] to [DATE_PERIOD or TIME_DURATION]") + ); + assertThat( + error("from types | EVAL x = \"1991-06-26T00:00:00.000Z\", y = date_trunc(\"" + interval + "\", x::datetime)"), + containsString("1:67: Cannot convert string [" + interval + "] to [DATE_PERIOD or TIME_DURATION]") + ); + } + for (String interval : List.of("1", "0.5", "invalid")) { + assertThat( + error("from types | EVAL x = date_trunc(\"" + interval + "\", \"1991-06-26T00:00:00.000Z\")"), + containsString( + "1:24: first argument of [date_trunc(\"" + + interval + + "\", \"1991-06-26T00:00:00.000Z\")] must be [dateperiod or timeduration], found value [\"" + + interval + + "\"] type [keyword]" + ) + ); + assertThat( + error("from types | EVAL x = \"1991-06-26T00:00:00.000Z\", y = date_trunc(\"" + interval + "\", x::datetime)"), + containsString( + "1:56: first argument of [date_trunc(\"" + + interval + + "\", x::datetime)] " + + "must be [dateperiod or timeduration], found value [\"" + + interval + + "\"] type [keyword]" + ) + ); + } + + // Bucket + assertEquals( + "1:52: Cannot convert string [1 yar] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'yar']", + error("from test | stats max(emp_no) by bucket(hire_date, \"1 yar\")") + ); + assertEquals( + "1:52: Cannot convert string [1 hur] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'hur']", + error("from test | stats max(emp_no) by bucket(hire_date, \"1 hur\")") + ); + assertEquals( + "1:58: Cannot convert string [1 mu] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'mu']", + error("from test | stats max = max(emp_no) by bucket(hire_date, \"1 mu\") | sort max ") + ); + assertEquals( + "1:34: second argument of [bucket(hire_date, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | stats max(emp_no) by bucket(hire_date, \"1\")") + ); + assertEquals( + "1:40: second argument of [bucket(hire_date, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | stats max = max(emp_no) by bucket(hire_date, \"1\") | sort max ") + ); + assertEquals( + "1:68: second argument of [bucket(y, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | eval x = emp_no, y = hire_date | stats max = max(x) by bucket(y, \"1\") | sort max ") + ); + } + + public void testCategorizeSingleGrouping() { + query("from test | STATS COUNT(*) BY CATEGORIZE(first_name)"); + query("from test | STATS COUNT(*) BY cat = CATEGORIZE(first_name)"); + + assertEquals( + "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("from test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no") + ); + assertEquals( + "1:39: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY emp_no, CATEGORIZE(first_name)") + ); + assertEquals( + "1:35: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY a = CATEGORIZE(first_name), b = emp_no") + ); + assertEquals( + "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings\n" + + "line 1:55: cannot use CATEGORIZE grouping function [CATEGORIZE(last_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(last_name)") + ); + assertEquals( + "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(first_name)") + ); + } + + public void testCategorizeNestedGrouping() { + query("from test | STATS COUNT(*) BY CATEGORIZE(LENGTH(first_name)::string)"); + + assertEquals( + "1:40: CATEGORIZE grouping function [CATEGORIZE(first_name)] can't be used within other expressions", + error("FROM test | STATS COUNT(*) BY MV_COUNT(CATEGORIZE(first_name))") + ); + assertEquals( + "1:31: CATEGORIZE grouping function [CATEGORIZE(first_name)] can't be used within other expressions", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name)::datetime") + ); + } + + public void testCategorizeWithinAggregations() { + query("from test | STATS MV_COUNT(cat), COUNT(*) BY cat = CATEGORIZE(first_name)"); + + assertEquals( + "1:25: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] within the aggregations", + error("FROM test | STATS COUNT(CATEGORIZE(first_name)) BY CATEGORIZE(first_name)") + ); + + assertEquals( + "1:25: cannot reference CATEGORIZE grouping function [cat] within the aggregations", + error("FROM test | STATS COUNT(cat) BY cat = CATEGORIZE(first_name)") + ); + assertEquals( + "1:30: cannot reference CATEGORIZE grouping function [cat] within the aggregations", + error("FROM test | STATS SUM(LENGTH(cat::keyword) + LENGTH(last_name)) BY cat = CATEGORIZE(first_name)") + ); + assertEquals( + "1:25: cannot reference CATEGORIZE grouping function [`CATEGORIZE(first_name)`] within the aggregations", + error("FROM test | STATS COUNT(`CATEGORIZE(first_name)`) BY CATEGORIZE(first_name)") + ); + } + private void query(String query) { defaultAnalyzer.analyze(parser.createStatement(query)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 6a552f400d36e..181b8d52bf888 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -879,8 +879,7 @@ public static void renderDocs() throws IOException { "elseValue", trueValue.type(), "The value that's returned when no condition evaluates to `true`.", - true, - EsqlFunctionRegistry.getTargetType(trueValue.type()) + true ); description = new EsqlFunctionRegistry.FunctionDescription( description.name(), @@ -1085,8 +1084,7 @@ private static void renderDocsForOperators(String name) throws IOException { String[] type = paramInfo == null ? new String[] { "?" } : paramInfo.type(); String desc = paramInfo == null ? "" : paramInfo.description().replace('\n', ' '); boolean optional = paramInfo == null ? false : paramInfo.optional(); - DataType targetDataType = EsqlFunctionRegistry.getTargetType(type); - args.add(new EsqlFunctionRegistry.ArgSignature(paramName, type, desc, optional, targetDataType)); + args.add(new EsqlFunctionRegistry.ArgSignature(paramName, type, desc, optional)); } } renderKibanaFunctionDefinition(name, functionInfo, args, likeOrInOperator(name)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java new file mode 100644 index 0000000000000..98b5268797c8c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class ByteLengthSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected ByteLength create(Source source, Expression child) { + return new ByteLength(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java new file mode 100644 index 0000000000000..866b8e0cd8da3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class ByteLengthTests extends AbstractScalarFunctionTestCase { + public ByteLengthTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + cases.addAll(List.of(new TestCaseSupplier("byte length basic test", List.of(DataType.KEYWORD), () -> { + var s = randomAlphaOfLength(between(0, 10000)); + return testCase(s, DataType.KEYWORD, s.length()); + }))); + cases.addAll(makeTestCases("empty string", () -> "", 0)); + cases.addAll(makeTestCases("single ascii character", () -> "a", 1)); + cases.addAll(makeTestCases("ascii string", () -> "clump", 5)); + cases.addAll(makeTestCases("3 bytes, 1 code point", () -> "☕", 3)); + cases.addAll(makeTestCases("6 bytes, 2 code points", () -> "❗️", 6)); + cases.addAll(makeTestCases("100 random alpha", () -> randomAlphaOfLength(100), 100)); + return parameterSuppliersFromTypedDataWithDefaultChecks(ENTIRELY_NULL_PRESERVES_TYPE, cases, (v, p) -> "string"); + } + + private static List makeTestCases(String title, Supplier text, int expectedByteLength) { + return Stream.of(DataType.KEYWORD, DataType.TEXT, DataType.SEMANTIC_TEXT) + .map( + dataType -> new TestCaseSupplier( + title + " with " + dataType, + List.of(dataType), + () -> testCase(text.get(), dataType, expectedByteLength) + ) + ) + .toList(); + } + + @Override + protected Expression build(Source source, List args) { + assert args.size() == 1; + return new ByteLength(source, args.get(0)); + } + + private static TestCaseSupplier.TestCase testCase(String s, DataType dataType, int expectedByteLength) { + var bytesRef = new BytesRef(s); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(bytesRef, dataType, "f")), + "ByteLengthEvaluator[val=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(expectedByteLength) + ); + } + + private static final boolean ENTIRELY_NULL_PRESERVES_TYPE = true; +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index fdc4935d457e9..c29f111488f96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongVectorBlock; import org.elasticsearch.core.Tuple; import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.IndexMode; @@ -148,6 +150,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation.EQ; import static org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation.GT; import static org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation.GTE; @@ -166,6 +169,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; //@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") @@ -564,6 +568,537 @@ public void testStatsWithFilteringDefaultAliasing() { assertThat(Expressions.names(agg.aggregates()), contains("sum(salary)", "sum(salary) WheRe last_name == \"Doe\"")); } + /* + * Limit[1000[INTEGER]] + * \_LocalRelation[[sum(salary) where false{r}#26],[ConstantNullBlock[positions=1]]] + */ + public void testReplaceStatsFilteredAggWithEvalSingleAgg() { + var plan = plan(""" + from test + | stats sum(salary) where false + """); + + var project = as(plan, Limit.class); + var source = as(project.child(), LocalRelation.class); + assertThat(Expressions.names(source.output()), contains("sum(salary) where false")); + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + assertThat(blocks[0].getPositionCount(), is(1)); + assertTrue(blocks[0].areAllValuesNull()); + } + + /* + * Project[[sum(salary) + 1 where false{r}#68]] + * \_Eval[[$$SUM$sum(salary)_+_1$0{r$}#79 + 1[INTEGER] AS sum(salary) + 1 where false]] + * \_Limit[1000[INTEGER]] + * \_LocalRelation[[$$SUM$sum(salary)_+_1$0{r$}#79],[ConstantNullBlock[positions=1]]] + */ + public void testReplaceStatsFilteredAggWithEvalSingleAggWithExpression() { + var plan = plan(""" + from test + | stats sum(salary) + 1 where false + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("sum(salary) + 1 where false")); + + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(1)); + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(alias.name(), is("sum(salary) + 1 where false")); + var add = as(alias.child(), Add.class); + var literal = as(add.right(), Literal.class); + assertThat(literal.fold(), is(1)); + + var limit = as(eval.child(), Limit.class); + var source = as(limit.child(), LocalRelation.class); + + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + assertThat(blocks[0].getPositionCount(), is(1)); + assertTrue(blocks[0].areAllValuesNull()); + } + + /* + * Project[[sum(salary) + 1 where false{r}#4, sum(salary) + 2{r}#6, emp_no{f}#7]] + * \_Eval[[null[LONG] AS sum(salary) + 1 where false, $$SUM$sum(salary)_+_2$1{r$}#18 + 2[INTEGER] AS sum(salary) + 2]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[emp_no{f}#7],[SUM(salary{f}#12,true[BOOLEAN]) AS $$SUM$sum(salary)_+_2$1, emp_no{f}#7]] + * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + */ + public void testReplaceStatsFilteredAggWithEvalMixedFilterAndNoFilter() { + var plan = plan(""" + from test + | stats sum(salary) + 1 where false, + sum(salary) + 2 + by emp_no + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("sum(salary) + 1 where false", "sum(salary) + 2", "emp_no")); + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(2)); + + var alias = as(eval.fields().getFirst(), Alias.class); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(LONG)); + + alias = as(eval.fields().getLast(), Alias.class); + assertThat(Expressions.name(alias.child()), containsString("sum(salary) + 2")); + + var limit = as(eval.child(), Limit.class); + var aggregate = as(limit.child(), Aggregate.class); + var source = as(aggregate.child(), EsRelation.class); + } + + /* + * Project[[sum(salary) + 1 where false{r}#3, sum(salary) + 3{r}#5, sum(salary) + 2 where false{r}#7]] + * \_Eval[[null[LONG] AS sum(salary) + 1 where false, $$SUM$sum(salary)_+_3$1{r$}#19 + 3[INTEGER] AS sum(salary) + 3, nu + * ll[LONG] AS sum(salary) + 2 where false]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[],[SUM(salary{f}#13,true[BOOLEAN]) AS $$SUM$sum(salary)_+_3$1]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testReplaceStatsFilteredAggWithEvalFilterFalseAndNull() { + var plan = plan(""" + from test + | stats sum(salary) + 1 where false, + sum(salary) + 3, + sum(salary) + 2 where null + """); + + var project = as(plan, Project.class); + assertThat( + Expressions.names(project.projections()), + contains("sum(salary) + 1 where false", "sum(salary) + 3", "sum(salary) + 2 where null") + ); + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(3)); + + var alias = as(eval.fields().getFirst(), Alias.class); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(LONG)); + + alias = as(eval.fields().get(1), Alias.class); + assertThat(Expressions.name(alias.child()), containsString("sum(salary) + 3")); + + alias = as(eval.fields().getLast(), Alias.class); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(LONG)); + + var limit = as(eval.child(), Limit.class); + var aggregate = as(limit.child(), Aggregate.class); + var source = as(aggregate.child(), EsRelation.class); + } + + /* + * Limit[1000[INTEGER]] + * \_LocalRelation[[count(salary) where false{r}#3],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + public void testReplaceStatsFilteredAggWithEvalCount() { + var plan = plan(""" + from test + | stats count(salary) where false + """); + + var limit = as(plan, Limit.class); + var source = as(limit.child(), LocalRelation.class); + assertThat(Expressions.names(source.output()), contains("count(salary) where false")); + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + var block = as(blocks[0], LongVectorBlock.class); + assertThat(block.getPositionCount(), is(1)); + assertThat(block.asVector().getLong(0), is(0L)); + } + + /* + * Project[[count_distinct(salary + 2) + 3 where false{r}#3]] + * \_Eval[[$$COUNTDISTINCT$count_distinct(>$0{r$}#15 + 3[INTEGER] AS count_distinct(salary + 2) + 3 where false]] + * \_Limit[1000[INTEGER]] + * \_LocalRelation[[$$COUNTDISTINCT$count_distinct(>$0{r$}#15],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + public void testReplaceStatsFilteredAggWithEvalCountDistinctInExpression() { + var plan = plan(""" + from test + | stats count_distinct(salary + 2) + 3 where false + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("count_distinct(salary + 2) + 3 where false")); + + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(1)); + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(alias.name(), is("count_distinct(salary + 2) + 3 where false")); + var add = as(alias.child(), Add.class); + var literal = as(add.right(), Literal.class); + assertThat(literal.fold(), is(3)); + + var limit = as(eval.child(), Limit.class); + var source = as(limit.child(), LocalRelation.class); + + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + var block = as(blocks[0], LongVectorBlock.class); + assertThat(block.getPositionCount(), is(1)); + assertThat(block.asVector().getLong(0), is(0L)); + } + + /* + * Project[[max{r}#91, max_a{r}#94, min{r}#97, min_a{r}#100, emp_no{f}#101]] + * \_Eval[[null[INTEGER] AS max_a, null[INTEGER] AS min_a]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[emp_no{f}#101],[MAX(salary{f}#106,true[BOOLEAN]) AS max, MIN(salary{f}#106,true[BOOLEAN]) AS min, emp_ + * no{f}#101]] + * \_EsRelation[test][_meta_field{f}#107, emp_no{f}#101, first_name{f}#10..] + */ + public void testReplaceStatsFilteredAggWithEvalSameAggWithAndWithoutFilter() { + var plan = plan(""" + from test + | stats max = max(salary), max_a = max(salary) where null, + min = min(salary), min_a = min(salary) where to_string(null) == "abc" + by emp_no + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("max", "max_a", "min", "min_a", "emp_no")); + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(2)); + + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(Expressions.name(alias), containsString("max_a")); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(INTEGER)); + + alias = as(eval.fields().getLast(), Alias.class); + assertThat(Expressions.name(alias), containsString("min_a")); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(INTEGER)); + + var limit = as(eval.child(), Limit.class); + + var aggregate = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(aggregate.aggregates()), contains("max", "min", "emp_no")); + + var source = as(aggregate.child(), EsRelation.class); + } + + /* + * Limit[1000[INTEGER]] + * \_LocalRelation[[count{r}#7],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100634") // i.e. PropagateEvalFoldables applicability to Aggs + public void testReplaceStatsFilteredAggWithEvalFilterUsingEvaledValue() { + var plan = plan(""" + from test + | eval my_length = length(concat(first_name, null)) + | stats count = count(my_length) where my_length > 0 + """); + + var limit = as(plan, Limit.class); + var source = as(limit.child(), LocalRelation.class); + assertThat(Expressions.names(source.output()), contains("count")); + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + var block = as(blocks[0], LongVectorBlock.class); + assertThat(block.getPositionCount(), is(1)); + assertThat(block.asVector().getLong(0), is(0L)); + } + + /* + * Project[[c{r}#67, emp_no{f}#68]] + * \_Eval[[0[LONG] AS c]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[emp_no{f}#68],[emp_no{f}#68]] + * \_EsRelation[test][_meta_field{f}#74, emp_no{f}#68, first_name{f}#69, ..] + */ + public void testReplaceStatsFilteredAggWithEvalSingleAggWithGroup() { + var plan = plan(""" + from test + | stats c = count(emp_no) where false + by emp_no + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("c", "emp_no")); + + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(1)); + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(Expressions.name(alias), containsString("c")); + + var limit = as(eval.child(), Limit.class); + + var aggregate = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(aggregate.aggregates()), contains("emp_no")); + + var source = as(aggregate.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilter() { + var plan = plan(""" + from test + | stats m = min(salary) where emp_no > 1, + max(salary) where emp_no > 1 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + assertThat(Expressions.name(filter.condition()), is("emp_no > 1")); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterUsingAliases() { + var plan = plan(""" + from test + | eval eno = emp_no + | drop emp_no + | stats min(salary) where eno > 1, + max(salary) where eno > 1 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + assertThat(Expressions.name(filter.condition()), is("eno > 1")); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterUsingJustOneAlias() { + var plan = plan(""" + from test + | eval eno = emp_no + | stats min(salary) where emp_no > 1, + max(salary) where eno > 1 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + var gt = as(filter.condition(), GreaterThan.class); + assertThat(Expressions.name(gt.left()), is("emp_no")); + assertTrue(gt.right().foldable()); + assertThat(gt.right().fold(), is(1)); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedNotSameFilter() { + var plan = plan(""" + from test + | stats min(salary) where emp_no > 1, + max(salary) where emp_no > 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + var source = as(agg.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedOnLackingFilter() { + var plan = plan(""" + from test + | stats min(salary), + max(salary) where emp_no > 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + var source = as(agg.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedWithGroups() { + var plan = plan(""" + from test + | stats min(salary) where emp_no > 2, + max(salary) where emp_no > 2 by first_name + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(3)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + var source = as(agg.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterNormalizeAndCombineWithExistingFilter() { + var plan = plan(""" + from test + | where emp_no > 3 + | stats min(salary) where emp_no > 2, + max(salary) where 2 < emp_no + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + assertThat(Expressions.name(filter.condition()), is("emp_no > 3")); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterInConjunction() { + var plan = plan(""" + from test + | stats min(salary) where emp_no > 2 and first_name == "John", + max(salary) where emp_no > 1 + 1 and length(last_name) < 19 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("first_name == \"John\"")); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("length(last_name) < 19")); + + var filter = as(agg.child(), Filter.class); + var gt = as(filter.condition(), GreaterThan.class); // name is "emp_no > 1 + 1" + assertThat(Expressions.name(gt.left()), is("emp_no")); + assertTrue(gt.right().foldable()); + assertThat(gt.right().fold(), is(2)); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterInConjunctionWithMultipleCommonConjunctions() { + var plan = plan(""" + from test + | stats min(salary) where emp_no < 10 and first_name == "John" and last_name == "Doe", + max(salary) where emp_no - 1 < 2 + 7 and length(last_name) < 19 and last_name == "Doe" + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("first_name == \"John\"")); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("length(last_name) < 19")); + + var filter = as(agg.child(), Filter.class); + var and = as(filter.condition(), And.class); + + var lt = as(and.left(), LessThan.class); + assertThat(Expressions.name(lt.left()), is("emp_no")); + assertTrue(lt.right().foldable()); + assertThat(lt.right().fold(), is(10)); + + var equals = as(and.right(), Equals.class); + assertThat(Expressions.name(equals.left()), is("last_name")); + assertTrue(equals.right().foldable()); + assertThat(equals.right().fold(), is(BytesRefs.toBytesRef("Doe"))); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedDueToDisjunction() { + // same query as in testExtractStatsCommonFilterInConjunction, except for the OR in the filter + var plan = plan(""" + from test + | stats min(salary) where emp_no > 2 OR first_name == "John", + max(salary) where emp_no > 1 + 1 and length(last_name) < 19 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(Or.class)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(And.class)); + + var source = as(agg.child(), EsRelation.class); + } + public void testQlComparisonOptimizationsApply() { var plan = plan(""" from test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index 67b4dd71260aa..0177747d27243 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -431,7 +431,7 @@ public void testDatePeriodLiterals() { } public void testUnknownNumericQualifier() { - assertParsingException(() -> whereExpression("1 decade"), "Unexpected time interval qualifier: 'decade'"); + assertParsingException(() -> whereExpression("1 decade"), "Unexpected temporal unit: 'decade'"); } public void testQualifiedDecimalLiteral() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 0e09809d16902..5a7547d011c0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; @@ -115,6 +116,7 @@ public static List params() { new Pow(Source.EMPTY, DOUBLE1, DOUBLE2), DOUBLE1, literal, + new ByteLength(Source.EMPTY, literal), new Length(Source.EMPTY, literal), new DateFormat(Source.EMPTY, datePattern, DATE, TEST_CONFIG), new DateFormat(Source.EMPTY, datePattern, literal, TEST_CONFIG), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index e60024ecd5db4..60b632c443f8e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.index.EsIndex; @@ -228,7 +229,8 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { IndexMode.STANDARD ) ); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); + + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of()); EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); @@ -266,7 +268,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { IndexMode.STANDARD ) ); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of()); EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); @@ -293,7 +295,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); - executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); EsIndex esIndex = new EsIndex( "logs*,remote2:mylogs1,remote2:mylogs2,remote2:logs*", @@ -302,7 +304,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); // remote1 is unavailable var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of(remote1Alias, failure)); EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); @@ -341,8 +343,12 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); - EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of(remote1Alias, failure)); + VerificationException ve = expectThrows( + VerificationException.class, + () -> EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution) + ); + assertThat(ve.getDetailedMessage(), containsString("Unknown index [remote2:mylogs1,mylogs2,logs*]")); } } @@ -579,4 +585,46 @@ public void testUpdateExecutionInfoToReturnEmptyResult() { assertThat(remoteFailures.get(0).reason(), containsString("unable to connect to remote cluster")); } } + + public void testMissingIndicesIsFatal() { + String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + String remote1Alias = "remote1"; + String remote2Alias = "remote2"; + String remote3Alias = "remote3"; + + // scenario 1: cluster is skip_unavailable=true - not fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "mylogs1,mylogs2,logs*", true)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(remote1Alias, executionInfo), equalTo(false)); + } + + // scenario 2: cluster is local cluster and had no concrete indices - not fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "mylogs1,mylogs2,logs*", true)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(localClusterAlias, executionInfo), equalTo(false)); + } + + // scenario 3: cluster is local cluster and user specified a concrete index - fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + String localIndexExpr = randomFrom("foo*,logs", "logs", "logs,metrics", "bar*,x*,logs", "logs-1,*x*"); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, localIndexExpr, false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "mylogs1,mylogs2,logs*", true)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(localClusterAlias, executionInfo), equalTo(true)); + } + + // scenario 4: cluster is skip_unavailable=false - always fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "*", false)); + String indexExpr = randomFrom("foo*,logs", "logs", "bar*,x*,logs", "logs-1,*x*", "*"); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, indexExpr, false)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(remote1Alias, executionInfo), equalTo(true)); + } + + } } diff --git a/x-pack/plugin/fleet/qa/rest/build.gradle b/x-pack/plugin/fleet/qa/rest/build.gradle index fda9251c7ef34..0959e883997d3 100644 --- a/x-pack/plugin/fleet/qa/rest/build.gradle +++ b/x-pack/plugin/fleet/qa/rest/build.gradle @@ -1,8 +1,13 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -dependencies { - yamlRestTestImplementation(testArtifact(project(xpackModule('core')))) -} +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-test-artifact' restResources { restApi { @@ -10,11 +15,17 @@ restResources { } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - extraConfigFile 'roles.yml', file('roles.yml') - user username: 'elastic_admin', password: 'admin-password' - user username: 'fleet_unprivileged_secrets', password: 'password', role: 'unprivileged_secrets' +artifacts { + restXpackTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} + +tasks.named('yamlRestTest') { + usesDefaultDistribution() +} +tasks.named('yamlRestCompatTest') { + usesDefaultDistribution() +} +if (buildParams.inFipsJvm){ + // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC + tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java index 202149abf11e1..bc49649bc1139 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java @@ -12,8 +12,12 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class FleetRestIT extends ESClientYamlSuiteTestCase { @@ -21,14 +25,30 @@ public FleetRestIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); } + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.security.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("elastic_admin", "admin-password", "superuser", true) + .user("fleet_unprivileged_secrets", "password", "unprivileged_secrets", true) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restClientSettings() { - String authentication = basicAuthHeaderValue("elastic_admin", new SecureString("admin-password".toCharArray())); - return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", authentication).build(); + String token = basicAuthHeaderValue("elastic_admin", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } @ParametersFactory public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml index 5610502a65d23..4c168c8feb0cd 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml @@ -105,6 +105,7 @@ setup: index: "test-after-refresh" allow_partial_search_results: false wait_for_checkpoints: 2 + wait_for_checkpoints_timeout: 1m body: { query: { match_all: {} } } --- @@ -115,7 +116,7 @@ setup: body: - { "allow_partial_search_results": false, wait_for_checkpoints: 1 } - { query: { match_all: { } } } - - { "allow_partial_search_results": false, wait_for_checkpoints: 2 } + - { "allow_partial_search_results": false, wait_for_checkpoints: 2, wait_for_checkpoints_timeout: 1m } - { query: { match_all: { } } } - match: { responses.0._shards.successful: 1 } @@ -128,7 +129,7 @@ setup: - {query: { match_all: {} } } - { "index": "test-alias", "allow_partial_search_results": false, wait_for_checkpoints: 1 } - { query: { match_all: { } } } - - {"index": "test-refresh-disabled", "allow_partial_search_results": false, wait_for_checkpoints: 2} + - { "index": "test-refresh-disabled", "allow_partial_search_results": false, wait_for_checkpoints: 2, wait_for_checkpoints_timeout: 1m } - {query: { match_all: {} } } - match: { responses.0._shards.successful: 1 } diff --git a/x-pack/plugin/fleet/qa/rest/roles.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/roles.yml similarity index 100% rename from x-pack/plugin/fleet/qa/rest/roles.yml rename to x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/roles.yml diff --git a/x-pack/plugin/identity-provider/build.gradle b/x-pack/plugin/identity-provider/build.gradle index dd085e62efa48..f3b0def7eee97 100644 --- a/x-pack/plugin/identity-provider/build.gradle +++ b/x-pack/plugin/identity-provider/build.gradle @@ -281,7 +281,7 @@ tasks.named("thirdPartyAudit").configure { addQaCheckDependencies(project) -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // We don't support the IDP in FIPS-140 mode, so no need to run tests tasks.named("test").configure { enabled = false } } diff --git a/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle b/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle index 46e705ce27244..b109c01181729 100644 --- a/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle +++ b/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle @@ -48,6 +48,6 @@ testClusters.configureEach { // We don't support the IDP in FIPS-140 mode, so no need to run java rest tests tasks.named("javaRestTest").configure { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle index 111496669afe3..256225c5ef3bf 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle @@ -59,5 +59,5 @@ testClusters.matching{ it.name == 'follow-cluster' }.configureEach { tasks.named("check").configure { dependsOn 'follow-cluster' } // Security is explicitly disabled for follow-cluster and leader-cluster, do not run these in FIPS mode tasks.withType(Test).configureEach { - enabled = BuildParams.inFipsJvm == false + enabled = buildParams.inFipsJvm == false } diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle index 8712af84ac245..d420ac9effdde 100644 --- a/x-pack/plugin/ilm/qa/multi-node/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -40,7 +40,7 @@ testClusters.configureEach { setting 'time_series.poll_interval', '10m' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 15a2d0eb41368..29d5add35ff49 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -205,8 +205,14 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess' ) ignoreMissingClasses( @@ -320,10 +326,9 @@ tasks.named("thirdPartyAudit").configure { 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', - 'com.github.luben.zstd.BaseZstdBufferDecompressingStreamNoFinalizer', 'com.github.luben.zstd.Zstd', - 'com.github.luben.zstd.ZstdBufferDecompressingStreamNoFinalizer', - 'com.github.luben.zstd.ZstdDirectBufferDecompressingStreamNoFinalizer', + 'com.github.luben.zstd.ZstdInputStreamNoFinalizer', + 'com.github.luben.zstd.util.Native', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index f9a1318cd9740..081c83b1e7067 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -135,9 +135,9 @@ public void testApisWithoutTaskType() throws IOException { public void testGetServicesWithoutTaskType() throws IOException { List services = getAllServices(); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - assertThat(services.size(), equalTo(19)); - } else { assertThat(services.size(), equalTo(18)); + } else { + assertThat(services.size(), equalTo(17)); } String[] providers = new String[services.size()]; @@ -160,7 +160,6 @@ public void testGetServicesWithoutTaskType() throws IOException { "googleaistudio", "googlevertexai", "hugging_face", - "hugging_face_elser", "mistral", "openai", "streaming_completion_test_service", @@ -259,9 +258,9 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { List services = getServices(TaskType.SPARSE_EMBEDDING); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - assertThat(services.size(), equalTo(6)); - } else { assertThat(services.size(), equalTo(5)); + } else { + assertThat(services.size(), equalTo(4)); } String[] providers = new String[services.size()]; @@ -272,9 +271,7 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { Arrays.sort(providers); - var providerList = new ArrayList<>( - Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "hugging_face_elser", "test_service") - ); + var providerList = new ArrayList<>(Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "test_service")); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { providerList.add(1, "elastic"); } diff --git a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle index 64edb196397a0..c05e71fa1cd55 100644 --- a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle @@ -20,7 +20,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> def javaRestTest = tasks.register("v${bwcVersion}#javaRestTest", StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle b/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle index 5d72fc96d98d8..bfaff7c84d9ad 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle +++ b/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle @@ -20,7 +20,7 @@ dependencies { } // Inference API added in 8.11 -BuildParams.bwcVersions.withWireCompatible(v -> v.after("8.11.0")) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(v -> v.after("8.11.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java index 6496bcdd89f21..3be85ee857bbb 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; @@ -90,7 +91,7 @@ public Model parsePersistedConfig(String modelId, TaskType taskType, Map serviceSettingsMap); @Override - public void start(Model model, ActionListener listener) { + public void start(Model model, TimeValue timeout, ActionListener listener) { listener.onResponse(true); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java index a6109bfe659d7..002b2b0fe93b0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java @@ -68,7 +68,10 @@ private void getServiceConfigurationsForTaskType( var filteredServices = serviceRegistry.getServices() .entrySet() .stream() - .filter(service -> service.getValue().supportedTaskTypes().contains(requestedTaskType)) + .filter( + service -> service.getValue().hideFromConfigurationApi() == false + && service.getValue().supportedTaskTypes().contains(requestedTaskType) + ) .collect(Collectors.toSet()); getServiceConfigurationsForServices(filteredServices, listener.delegateFailureAndWrap((delegate, configurations) -> { @@ -77,12 +80,14 @@ private void getServiceConfigurationsForTaskType( } private void getAllServiceConfigurations(ActionListener listener) { - getServiceConfigurationsForServices( - serviceRegistry.getServices().entrySet(), - listener.delegateFailureAndWrap((delegate, configurations) -> { - delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); - }) - ); + var availableServices = serviceRegistry.getServices() + .entrySet() + .stream() + .filter(service -> service.getValue().hideFromConfigurationApi() == false) + .collect(Collectors.toSet()); + getServiceConfigurationsForServices(availableServices, listener.delegateFailureAndWrap((delegate, configurations) -> { + delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); + })); } private void getServiceConfigurationsForServices( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java index 64eeed82ee1b9..2baee7f8afd66 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.StrictDynamicMappingException; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; @@ -159,7 +160,7 @@ protected void masterOperation( return; } - parseAndStoreModel(service.get(), request.getInferenceEntityId(), resolvedTaskType, requestAsMap, listener); + parseAndStoreModel(service.get(), request.getInferenceEntityId(), resolvedTaskType, requestAsMap, request.ackTimeout(), listener); } private void parseAndStoreModel( @@ -167,12 +168,13 @@ private void parseAndStoreModel( String inferenceEntityId, TaskType taskType, Map config, + TimeValue timeout, ActionListener listener ) { ActionListener storeModelListener = listener.delegateFailureAndWrap( (delegate, verifiedModel) -> modelRegistry.storeModel( verifiedModel, - ActionListener.wrap(r -> startInferenceEndpoint(service, verifiedModel, delegate), e -> { + ActionListener.wrap(r -> startInferenceEndpoint(service, timeout, verifiedModel, delegate), e -> { if (e.getCause() instanceof StrictDynamicMappingException && e.getCause().getMessage().contains("chunking_settings")) { delegate.onFailure( new ElasticsearchStatusException( @@ -199,11 +201,16 @@ private void parseAndStoreModel( service.parseRequestConfig(inferenceEntityId, taskType, config, parsedModelListener); } - private void startInferenceEndpoint(InferenceService service, Model model, ActionListener listener) { + private void startInferenceEndpoint( + InferenceService service, + TimeValue timeout, + Model model, + ActionListener listener + ) { if (skipValidationAndStart) { listener.onResponse(new PutInferenceModelAction.Response(model.getConfigurations())); } else { - service.start(model, listener.map(started -> new PutInferenceModelAction.Response(model.getConfigurations()))); + service.start(model, timeout, listener.map(started -> new PutInferenceModelAction.Response(model.getConfigurations()))); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java index 27b3ae95f1aa4..99f535f81485c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.action.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.SenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.sender.GoogleVertexAiEmbeddingsRequestManager; @@ -33,9 +34,10 @@ public GoogleVertexAiActionCreator(Sender sender, ServiceComponents serviceCompo } @Override - public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings) { + public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings, InputType inputType) { + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, taskSettings, inputType); var requestManager = new GoogleVertexAiEmbeddingsRequestManager( - model, + overriddenModel, serviceComponents.truncator(), serviceComponents.threadPool() ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java index def8f09ce06be..2b5cd5854c8ab 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.action.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; @@ -15,7 +16,7 @@ public interface GoogleVertexAiActionVisitor { - ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings); + ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings, InputType inputType); ExecutableAction create(GoogleVertexAiRerankModel model, Map taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java index c0e36baf2e98f..75320bc762c8b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java @@ -40,7 +40,7 @@ public HttpRequest createHttpRequest() { HttpPost httpPost = new HttpPost(model.uri()); ByteArrayEntity byteEntity = new ByteArrayEntity( - Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings().autoTruncate())) + Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings())) .getBytes(StandardCharsets.UTF_8) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java index 2fae999599ba2..fc33df0d63acd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java @@ -7,23 +7,35 @@ package org.elasticsearch.xpack.inference.external.request.googlevertexai; -import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import java.io.IOException; import java.util.List; import java.util.Objects; -public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, @Nullable Boolean autoTruncation) implements ToXContentObject { +import static org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsTaskSettings.invalidInputTypeMessage; + +public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, GoogleVertexAiEmbeddingsTaskSettings taskSettings) + implements + ToXContentObject { private static final String INSTANCES_FIELD = "instances"; private static final String CONTENT_FIELD = "content"; private static final String PARAMETERS_FIELD = "parameters"; private static final String AUTO_TRUNCATE_FIELD = "autoTruncate"; + private static final String TASK_TYPE_FIELD = "task_type"; + + private static final String CLASSIFICATION_TASK_TYPE = "CLASSIFICATION"; + private static final String CLUSTERING_TASK_TYPE = "CLUSTERING"; + private static final String RETRIEVAL_DOCUMENT_TASK_TYPE = "RETRIEVAL_DOCUMENT"; + private static final String RETRIEVAL_QUERY_TASK_TYPE = "RETRIEVAL_QUERY"; public GoogleVertexAiEmbeddingsRequestEntity { Objects.requireNonNull(inputs); + Objects.requireNonNull(taskSettings); } @Override @@ -35,16 +47,20 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); { builder.field(CONTENT_FIELD, input); + + if (taskSettings.getInputType() != null) { + builder.field(TASK_TYPE_FIELD, convertToString(taskSettings.getInputType())); + } } builder.endObject(); } builder.endArray(); - if (autoTruncation != null) { + if (taskSettings.autoTruncate() != null) { builder.startObject(PARAMETERS_FIELD); { - builder.field(AUTO_TRUNCATE_FIELD, autoTruncation); + builder.field(AUTO_TRUNCATE_FIELD, taskSettings.autoTruncate()); } builder.endObject(); } @@ -52,4 +68,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + + static String convertToString(InputType inputType) { + return switch (inputType) { + case INGEST -> RETRIEVAL_DOCUMENT_TASK_TYPE; + case SEARCH -> RETRIEVAL_QUERY_TASK_TYPE; + case CLASSIFICATION -> CLASSIFICATION_TASK_TYPE; + case CLUSTERING -> CLUSTERING_TASK_TYPE; + default -> { + assert false : invalidInputTypeMessage(inputType); + yield null; + } + }; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 91b6cdc61afe4..c239319b6283a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -12,9 +12,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -157,17 +155,7 @@ protected RankDoc[] combineInnerRetrieverResults(List rankResults) { } @Override - protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { - var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit) - .trackTotalHits(false) - .storedFields(new StoredFieldsContext(false)) - .size(rankWindowSize); - // apply the pre-filters downstream once - if (preFilterQueryBuilders.isEmpty() == false) { - retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); - } - retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, true); - + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder sourceBuilder) { sourceBuilder.rankBuilder( new TextSimilarityRankBuilder(this.field, this.inferenceId, this.inferenceText, this.rankWindowSize, this.minScore) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index 953cf4cf6ad77..b8a99227cf517 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -104,13 +104,16 @@ protected abstract void doChunkedInfer( ActionListener> listener ); - @Override public void start(Model model, ActionListener listener) { init(); - doStart(model, listener); } + @Override + public void start(Model model, @Nullable TimeValue unused, ActionListener listener) { + start(model, listener); + } + protected void doStart(Model model, ActionListener listener) { listener.onResponse(true); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java index 5f97f3bad3dc8..922b366498c27 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java @@ -83,7 +83,7 @@ public BaseElasticsearchInternalService( } @Override - public void start(Model model, ActionListener finalListener) { + public void start(Model model, TimeValue timeout, ActionListener finalListener) { if (model instanceof ElasticsearchInternalModel esModel) { if (supportedTaskTypes().contains(model.getTaskType()) == false) { finalListener.onFailure( @@ -107,7 +107,7 @@ public void start(Model model, ActionListener finalListener) { } }) .andThen((l2, modelDidPut) -> { - var startRequest = esModel.getStartTrainedModelDeploymentActionRequest(); + var startRequest = esModel.getStartTrainedModelDeploymentActionRequest(timeout); var responseListener = esModel.getCreateTrainedModelAssignmentActionListener(model, finalListener); client.execute(StartTrainedModelDeploymentAction.INSTANCE, startRequest, responseListener); }) @@ -149,8 +149,7 @@ protected static IllegalStateException notElasticsearchModelException(Model mode ); } - @Override - public void putModel(Model model, ActionListener listener) { + protected void putModel(Model model, ActionListener listener) { if (model instanceof ElasticsearchInternalModel == false) { listener.onFailure(notElasticsearchModelException(model)); return; @@ -303,10 +302,9 @@ protected void maybeStartDeployment( } if (isDefaultId(model.getInferenceEntityId()) && ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { - this.start( - model, - listener.delegateFailureAndWrap((l, started) -> { client.execute(InferModelAction.INSTANCE, request, listener); }) - ); + this.start(model, request.getInferenceTimeout(), listener.delegateFailureAndWrap((l, started) -> { + client.execute(InferModelAction.INSTANCE, request, listener); + })); } else { listener.onFailure(e); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java index 996ef6816025d..724c7a8f0a166 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; @@ -31,7 +32,7 @@ public boolean usesExistingDeployment() { } @Override - public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { + public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(TimeValue timeout) { throw new IllegalStateException("cannot start model that uses an existing deployment"); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java index 8b2969c39b7ba..2405243f302bc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; @@ -67,11 +68,12 @@ public ElasticsearchInternalModel( this.internalServiceSettings = internalServiceSettings; } - public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { + public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(TimeValue timeout) { var startRequest = new StartTrainedModelDeploymentAction.Request(internalServiceSettings.modelId(), this.getInferenceEntityId()); startRequest.setNumberOfAllocations(internalServiceSettings.getNumAllocations()); startRequest.setThreadsPerAllocation(internalServiceSettings.getNumThreads()); startRequest.setAdaptiveAllocationsSettings(internalServiceSettings.getAdaptiveAllocationsSettings()); + startRequest.setTimeout(timeout); startRequest.setWaitForState(STARTED); return startRequest; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 83249266c79ab..fe83acc8574aa 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -68,6 +68,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Stream; @@ -680,25 +681,13 @@ public void chunkedInfer( esModel.getConfigurations().getChunkingSettings() ).batchRequestsWithListeners(listener); - for (var batch : batchedRequests) { - var inferenceRequest = buildInferenceRequest( - esModel.mlNodeDeploymentId(), - EmptyConfigUpdate.INSTANCE, - batch.batch().inputs(), - inputType, - timeout - ); - - ActionListener mlResultsListener = batch.listener() - .delegateFailureAndWrap( - (l, inferenceResult) -> translateToChunkedResult(model.getTaskType(), inferenceResult.getInferenceResults(), l) - ); - - var maybeDeployListener = mlResultsListener.delegateResponse( - (l, exception) -> maybeStartDeployment(esModel, exception, inferenceRequest, mlResultsListener) - ); - - client.execute(InferModelAction.INSTANCE, inferenceRequest, maybeDeployListener); + if (batchedRequests.isEmpty()) { + listener.onResponse(List.of()); + } else { + // Avoid filling the inference queue by executing the batches in series + // Each batch contains up to EMBEDDING_MAX_BATCH_SIZE inference request + var sequentialRunner = new BatchIterator(esModel, inputType, timeout, batchedRequests); + sequentialRunner.run(); } } else { listener.onFailure(notElasticsearchModelException(model)); @@ -1018,6 +1007,82 @@ static TaskType inferenceConfigToTaskType(InferenceConfig config) { } } + /** + * Iterates over the batch executing a limited number requests at a time to avoid + * filling the ML node inference queue. + * + * First, a single request is executed, which can also trigger deploying a model + * if necessary. When this request is successfully executed, a callback executes + * N requests in parallel next. Each of these requests also has a callback that + * executes one more request, so that at all time N requests are in-flight. This + * continues until all requests are executed. + */ + class BatchIterator { + private static final int NUM_REQUESTS_INFLIGHT = 20; // * batch size = 200 + + private final AtomicInteger index = new AtomicInteger(); + private final ElasticsearchInternalModel esModel; + private final List requestAndListeners; + private final InputType inputType; + private final TimeValue timeout; + + BatchIterator( + ElasticsearchInternalModel esModel, + InputType inputType, + TimeValue timeout, + List requestAndListeners + ) { + this.esModel = esModel; + this.requestAndListeners = requestAndListeners; + this.inputType = inputType; + this.timeout = timeout; + } + + void run() { + // The first request may deploy the model, and upon completion runs + // NUM_REQUESTS_INFLIGHT in parallel. + inferenceExecutor.execute(() -> inferBatch(NUM_REQUESTS_INFLIGHT, true)); + } + + private void inferBatch(int runAfterCount, boolean maybeDeploy) { + int batchIndex = index.getAndIncrement(); + if (batchIndex >= requestAndListeners.size()) { + return; + } + executeRequest(batchIndex, maybeDeploy, () -> { + for (int i = 0; i < runAfterCount; i++) { + // Subsequent requests may not deploy the model, because the first request + // already did so. Upon completion, it runs one more request. + inferenceExecutor.execute(() -> inferBatch(1, false)); + } + }); + } + + private void executeRequest(int batchIndex, boolean maybeDeploy, Runnable runAfter) { + EmbeddingRequestChunker.BatchRequestAndListener batch = requestAndListeners.get(batchIndex); + var inferenceRequest = buildInferenceRequest( + esModel.mlNodeDeploymentId(), + EmptyConfigUpdate.INSTANCE, + batch.batch().inputs(), + inputType, + timeout + ); + logger.trace("Executing batch index={}", batchIndex); + + ActionListener listener = batch.listener() + .delegateFailureAndWrap( + (l, inferenceResult) -> translateToChunkedResult(esModel.getTaskType(), inferenceResult.getInferenceResults(), l) + ); + if (runAfter != null) { + listener = ActionListener.runAfter(listener, runAfter); + } + if (maybeDeploy) { + listener = listener.delegateResponse((l, exception) -> maybeStartDeployment(esModel, exception, inferenceRequest, l)); + } + client.execute(InferModelAction.INSTANCE, inferenceRequest, listener); + } + } + public static class Configuration { public static InferenceServiceConfiguration get() { return configuration.getOrCompute(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java index 17e6ec2152e7e..caa244f8af4f2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java @@ -7,13 +7,16 @@ package org.elasticsearch.xpack.inference.services.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; +import java.net.URI; import java.util.Map; import java.util.Objects; @@ -21,6 +24,8 @@ public abstract class GoogleVertexAiModel extends Model { private final GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings; + protected URI uri; + public GoogleVertexAiModel( ModelConfigurations configurations, ModelSecrets secrets, @@ -34,13 +39,24 @@ public GoogleVertexAiModel( public GoogleVertexAiModel(GoogleVertexAiModel model, ServiceSettings serviceSettings) { super(model, serviceSettings); + uri = model.uri(); + rateLimitServiceSettings = model.rateLimitServiceSettings(); + } + + public GoogleVertexAiModel(GoogleVertexAiModel model, TaskSettings taskSettings) { + super(model, taskSettings); + + uri = model.uri(); rateLimitServiceSettings = model.rateLimitServiceSettings(); } - public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings); + public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings, InputType inputType); public GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings() { return rateLimitServiceSettings; } + public URI uri() { + return uri; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java index 0b4da10e7130f..a05b1a937d376 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -210,7 +210,7 @@ protected void doInfer( var actionCreator = new GoogleVertexAiActionCreator(getSender(), getServiceComponents()); - var action = googleVertexAiModel.accept(actionCreator, taskSettings); + var action = googleVertexAiModel.accept(actionCreator, taskSettings, inputType); action.execute(inputs, timeout, listener); } @@ -235,7 +235,7 @@ protected void doChunkedInfer( ).batchRequestsWithListeners(listener); for (var request : batchedRequests) { - var action = googleVertexAiModel.accept(actionCreator, taskSettings); + var action = googleVertexAiModel.accept(actionCreator, taskSettings, inputType); action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java index 1df8ee937497a..a5acbb80b76ec 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java @@ -11,12 +11,14 @@ import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; @@ -29,13 +31,25 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; public class GoogleVertexAiEmbeddingsModel extends GoogleVertexAiModel { - private URI uri; + public static GoogleVertexAiEmbeddingsModel of( + GoogleVertexAiEmbeddingsModel model, + Map taskSettings, + InputType inputType + ) { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(taskSettings); + return new GoogleVertexAiEmbeddingsModel( + model, + GoogleVertexAiEmbeddingsTaskSettings.of(model.getTaskSettings(), requestTaskSettings, inputType) + ); + } public GoogleVertexAiEmbeddingsModel( String inferenceEntityId, @@ -62,6 +76,10 @@ public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, Google super(model, serviceSettings); } + public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, GoogleVertexAiEmbeddingsTaskSettings taskSettings) { + super(model, taskSettings); + } + // Should only be used directly for testing GoogleVertexAiEmbeddingsModel( String inferenceEntityId, @@ -126,13 +144,9 @@ public GoogleVertexAiEmbeddingsRateLimitServiceSettings rateLimitServiceSettings return (GoogleVertexAiEmbeddingsRateLimitServiceSettings) super.rateLimitServiceSettings(); } - public URI uri() { - return uri; - } - @Override - public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { - return visitor.create(this, taskSettings); + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings, InputType inputType) { + return visitor.create(this, taskSettings, inputType); } public static URI buildUri(String location, String projectId, String modelId) throws URISyntaxException { @@ -161,11 +175,32 @@ public static Map get() { new LazyInitializable<>(() -> { var configurationMap = new HashMap(); + configurationMap.put( + INPUT_TYPE, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Input Type") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the type of input passed to the model.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of( + InputType.CLASSIFICATION.toString(), + InputType.CLUSTERING.toString(), + InputType.INGEST.toString(), + InputType.SEARCH.toString() + ).map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()).toList() + ) + .setValue("") + .build() + ); + configurationMap.put( AUTO_TRUNCATE, new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) .setLabel("Auto Truncate") - .setOrder(1) + .setOrder(2) .setRequired(false) .setSensitive(false) .setTooltip("Specifies if the API truncates inputs longer than the maximum token length automatically.") diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java index 14a67a64377e2..e39c423582151 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java @@ -9,29 +9,46 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import java.util.Map; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.VALID_REQUEST_VALUES; -public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate) { +public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate, @Nullable InputType inputType) { - public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings( + null, + null + ); public static GoogleVertexAiEmbeddingsRequestTaskSettings fromMap(Map map) { - if (map.isEmpty()) { - return GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS; + if (map == null || map.isEmpty()) { + return EMPTY_SETTINGS; } ValidationException validationException = new ValidationException(); + InputType inputType = extractOptionalEnum( + map, + INPUT_TYPE, + ModelConfigurations.TASK_SETTINGS, + InputType::fromString, + VALID_REQUEST_VALUES, + validationException + ); + Boolean autoTruncate = extractOptionalBoolean(map, GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate, inputType); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java index dcdbbda33575f..9b759a4661bce 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java @@ -9,19 +9,24 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Map; import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { @@ -29,48 +34,108 @@ public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { public static final String AUTO_TRUNCATE = "auto_truncate"; - public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings( - Boolean.valueOf(null) + public static final String INPUT_TYPE = "input_type"; + + static final EnumSet VALID_REQUEST_VALUES = EnumSet.of( + InputType.INGEST, + InputType.SEARCH, + InputType.CLASSIFICATION, + InputType.CLUSTERING ); + public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings(null, null); + public static GoogleVertexAiEmbeddingsTaskSettings fromMap(Map map) { + if (map == null || map.isEmpty()) { + return EMPTY_SETTINGS; + } + ValidationException validationException = new ValidationException(); + InputType inputType = extractOptionalEnum( + map, + INPUT_TYPE, + ModelConfigurations.TASK_SETTINGS, + InputType::fromString, + VALID_REQUEST_VALUES, + validationException + ); + Boolean autoTruncate = extractOptionalBoolean(map, AUTO_TRUNCATE, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType); } public static GoogleVertexAiEmbeddingsTaskSettings of( GoogleVertexAiEmbeddingsTaskSettings originalSettings, - GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings + GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings, + InputType requestInputType ) { + var inputTypeToUse = getValidInputType(originalSettings, requestSettings, requestInputType); var autoTruncate = requestSettings.autoTruncate() == null ? originalSettings.autoTruncate : requestSettings.autoTruncate(); - return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputTypeToUse); + } + + private static InputType getValidInputType( + GoogleVertexAiEmbeddingsTaskSettings originalSettings, + GoogleVertexAiEmbeddingsRequestTaskSettings requestTaskSettings, + InputType requestInputType + ) { + InputType inputTypeToUse = originalSettings.inputType; + + if (VALID_REQUEST_VALUES.contains(requestInputType)) { + inputTypeToUse = requestInputType; + } else if (requestTaskSettings.inputType() != null) { + inputTypeToUse = requestTaskSettings.inputType(); + } + + return inputTypeToUse; } + private final InputType inputType; private final Boolean autoTruncate; - public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate) { + public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate, @Nullable InputType inputType) { + validateInputType(inputType); + this.inputType = inputType; this.autoTruncate = autoTruncate; } public GoogleVertexAiEmbeddingsTaskSettings(StreamInput in) throws IOException { this.autoTruncate = in.readOptionalBoolean(); + + var inputType = (in.getTransportVersion().onOrAfter(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) + ? in.readOptionalEnum(InputType.class) + : null; + + validateInputType(inputType); + this.inputType = inputType; + } + + private static void validateInputType(InputType inputType) { + if (inputType == null) { + return; + } + + assert VALID_REQUEST_VALUES.contains(inputType) : invalidInputTypeMessage(inputType); } @Override public boolean isEmpty() { - return autoTruncate == null; + return inputType == null && autoTruncate == null; } public Boolean autoTruncate() { return autoTruncate; } + public InputType getInputType() { + return inputType; + } + @Override public String getWriteableName() { return NAME; @@ -84,11 +149,19 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(this.autoTruncate); + + if (out.getTransportVersion().onOrAfter(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) { + out.writeOptionalEnum(this.inputType); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + if (inputType != null) { + builder.field(INPUT_TYPE, inputType); + } + if (autoTruncate != null) { builder.field(AUTO_TRUNCATE, autoTruncate); } @@ -101,19 +174,23 @@ public boolean equals(Object object) { if (this == object) return true; if (object == null || getClass() != object.getClass()) return false; GoogleVertexAiEmbeddingsTaskSettings that = (GoogleVertexAiEmbeddingsTaskSettings) object; - return Objects.equals(autoTruncate, that.autoTruncate); + return Objects.equals(inputType, that.inputType) && Objects.equals(autoTruncate, that.autoTruncate); } @Override public int hashCode() { - return Objects.hash(autoTruncate); + return Objects.hash(autoTruncate, inputType); + } + + public static String invalidInputTypeMessage(InputType inputType) { + return Strings.format("received invalid input type value [%s]", inputType.toString()); } @Override public TaskSettings updatedTaskSettings(Map newSettings) { - GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + GoogleVertexAiEmbeddingsRequestTaskSettings updatedSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( new HashMap<>(newSettings) ); - return of(this, requestSettings); + return of(this, updatedSettings, updatedSettings.inputType() != null ? updatedSettings.inputType() : this.inputType); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java index 3f9c4f7a66560..e73d8d2e2613a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java @@ -10,6 +10,7 @@ import org.apache.http.client.utils.URIBuilder; import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; @@ -34,8 +35,6 @@ public class GoogleVertexAiRerankModel extends GoogleVertexAiModel { - private URI uri; - public GoogleVertexAiRerankModel( String inferenceEntityId, TaskType taskType, @@ -122,12 +121,8 @@ public GoogleDiscoveryEngineRateLimitServiceSettings rateLimitServiceSettings() return (GoogleDiscoveryEngineRateLimitServiceSettings) super.rateLimitServiceSettings(); } - public URI uri() { - return uri; - } - @Override - public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings, InputType inputType) { return visitor.create(this, taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index e0afbf924f654..a2e22e24172cf 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -125,6 +125,11 @@ public InferenceServiceConfiguration getConfiguration() { return Configuration.get(); } + @Override + public Boolean hideFromConfigurationApi() { + return Boolean.TRUE; + } + @Override public EnumSet supportedTaskTypes() { return supportedTaskTypes; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java index c1be537a6b0a7..4fdf254101d3e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java @@ -24,12 +24,25 @@ import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class EmbeddingRequestChunkerTests extends ESTestCase { + public void testEmptyInput() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); + assertThat(batches, empty()); + } + + public void testBlankInput() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(""), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); + assertThat(batches, hasSize(1)); + } + public void testShortInputsAreSingleBatch() { String input = "one chunk"; var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java index f4912e0862e60..18ae7425aaaf2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java @@ -8,10 +8,12 @@ package org.elasticsearch.xpack.inference.external.request.googlevertexai; import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import java.io.IOException; import java.util.List; @@ -20,8 +22,11 @@ public class GoogleVertexAiEmbeddingsRequestEntityTests extends ESTestCase { - public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), true); + public void testToXContent_SingleEmbeddingRequest_WritesAllFields() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc"), + new GoogleVertexAiEmbeddingsTaskSettings(true, InputType.SEARCH) + ); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -31,7 +36,8 @@ public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined( { "instances": [ { - "content": "abc" + "content": "abc", + "task_type": "RETRIEVAL_QUERY" } ], "parameters": { @@ -42,7 +48,10 @@ public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined( } public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), null); + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc"), + new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.INGEST) + ); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -52,15 +61,16 @@ public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNo { "instances": [ { - "content": "abc" + "content": "abc", + "task_type": "RETRIEVAL_DOCUMENT" } ] } """)); } - public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), true); + public void testToXContent_SingleEmbeddingRequest_DoesNotWriteInputTypeIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), new GoogleVertexAiEmbeddingsTaskSettings(false, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -71,9 +81,35 @@ public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefin "instances": [ { "content": "abc" + } + ], + "parameters": { + "autoTruncate": false + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_WritesAllFields() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc", "def"), + new GoogleVertexAiEmbeddingsTaskSettings(true, InputType.CLUSTERING) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc", + "task_type": "CLUSTERING" }, { - "content": "def" + "content": "def", + "task_type": "CLUSTERING" } ], "parameters": { @@ -83,8 +119,8 @@ public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefin """)); } - public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null); + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteInputTypeIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), new GoogleVertexAiEmbeddingsTaskSettings(true, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -99,8 +135,48 @@ public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationI { "content": "def" } + ], + "parameters": { + "autoTruncate": true + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc", "def"), + new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.CLASSIFICATION) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc", + "task_type": "CLASSIFICATION" + }, + { + "content": "def", + "task_type": "CLASSIFICATION" + } ] } """)); } + + public void testToXContent_ThrowsIfInputIsNull() { + expectThrows( + NullPointerException.class, + () -> new GoogleVertexAiEmbeddingsRequestEntity(null, new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.CLASSIFICATION)) + ); + } + + public void testToXContent_ThrowsIfTaskSettingsIsNull() { + expectThrows(NullPointerException.class, () -> new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null)); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java index b28fd8d3a0cf9..a26d3496bed6b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java @@ -10,6 +10,7 @@ import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.common.Truncator; @@ -31,11 +32,11 @@ public class GoogleVertexAiEmbeddingsRequestTests extends ESTestCase { private static final String AUTH_HEADER_VALUE = "foo"; - public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet() throws IOException { + public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet_And_WithoutInputTypeSet() throws IOException { var model = "model"; var input = "input"; - var request = createRequest(model, input, null); + var request = createRequest(model, input, null, null); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -54,7 +55,7 @@ public void testCreateRequest_WithAutoTruncateSet() throws IOException { var input = "input"; var autoTruncate = true; - var request = createRequest(model, input, autoTruncate); + var request = createRequest(model, input, autoTruncate, null); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -68,11 +69,29 @@ public void testCreateRequest_WithAutoTruncateSet() throws IOException { assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input")), "parameters", Map.of("autoTruncate", true)))); } + public void testCreateRequest_WithInputTypeSet() throws IOException { + var model = "model"; + var input = "input"; + + var request = createRequest(model, input, null, InputType.SEARCH); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(1)); + assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input", "task_type", "RETRIEVAL_QUERY"))))); + } + public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { var model = "model"; var input = "abcd"; - var request = createRequest(model, input, null); + var request = createRequest(model, input, null, null); var truncatedRequest = request.truncate(); var httpRequest = truncatedRequest.createHttpRequest(); @@ -87,8 +106,13 @@ public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "ab"))))); } - private static GoogleVertexAiEmbeddingsRequest createRequest(String modelId, String input, @Nullable Boolean autoTruncate) { - var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate); + private static GoogleVertexAiEmbeddingsRequest createRequest( + String modelId, + String input, + @Nullable Boolean autoTruncate, + @Nullable InputType inputType + ) { + var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate, inputType); return new GoogleVertexAiEmbeddingsWithoutAuthRequest( TruncatorTests.createTruncator(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 89a27a921cbea..9a4d0dda82238 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -65,6 +66,7 @@ import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.WordBoundaryChunkingSettings; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.junit.After; import org.junit.Before; @@ -72,12 +74,14 @@ import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -832,16 +836,16 @@ public void testParsePersistedConfig() { } } - public void testChunkInfer_E5WithNullChunkingSettings() { + public void testChunkInfer_E5WithNullChunkingSettings() throws InterruptedException { testChunkInfer_e5(null); } - public void testChunkInfer_E5ChunkingSettingsSet() { + public void testChunkInfer_E5ChunkingSettingsSet() throws InterruptedException { testChunkInfer_e5(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { + private void testChunkInfer_e5(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); @@ -889,6 +893,9 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -897,22 +904,23 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } - public void testChunkInfer_SparseWithNullChunkingSettings() { + public void testChunkInfer_SparseWithNullChunkingSettings() throws InterruptedException { testChunkInfer_Sparse(null); } - public void testChunkInfer_SparseWithChunkingSettingsSet() { + public void testChunkInfer_SparseWithChunkingSettingsSet() throws InterruptedException { testChunkInfer_Sparse(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { + private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); @@ -936,6 +944,7 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { var service = createService(client); var gotResults = new AtomicBoolean(); + var resultsListener = ActionListener.>wrap(chunkedResponse -> { assertThat(chunkedResponse, hasSize(2)); assertThat(chunkedResponse.get(0), instanceOf(InferenceChunkedSparseEmbeddingResults.class)); @@ -955,6 +964,9 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -963,22 +975,23 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } - public void testChunkInfer_ElserWithNullChunkingSettings() { + public void testChunkInfer_ElserWithNullChunkingSettings() throws InterruptedException { testChunkInfer_Elser(null); } - public void testChunkInfer_ElserWithChunkingSettingsSet() { + public void testChunkInfer_ElserWithChunkingSettingsSet() throws InterruptedException { testChunkInfer_Elser(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { + private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); @@ -1022,6 +1035,9 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -1030,9 +1046,10 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } @@ -1093,7 +1110,7 @@ public void testChunkInferSetsTokenization() { } @SuppressWarnings("unchecked") - public void testChunkInfer_FailsBatch() { + public void testChunkInfer_FailsBatch() throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); @@ -1129,6 +1146,9 @@ public void testChunkInfer_FailsBatch() { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -1137,12 +1157,86 @@ public void testChunkInfer_FailsBatch() { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } + @SuppressWarnings("unchecked") + public void testChunkingLargeDocument() throws InterruptedException { + int numBatches = randomIntBetween(3, 6); + + // how many response objects to return in each batch + int[] numResponsesPerBatch = new int[numBatches]; + for (int i = 0; i < numBatches - 1; i++) { + numResponsesPerBatch[i] = ElasticsearchInternalService.EMBEDDING_MAX_BATCH_SIZE; + } + numResponsesPerBatch[numBatches - 1] = randomIntBetween(1, ElasticsearchInternalService.EMBEDDING_MAX_BATCH_SIZE); + int numChunks = Arrays.stream(numResponsesPerBatch).sum(); + + // build a doc with enough words to make numChunks of chunks + int wordsPerChunk = 10; + int numWords = numChunks * wordsPerChunk; + var input = "word ".repeat(numWords); + + Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + + // mock the inference response + doAnswer(invocationOnMock -> { + var request = (InferModelAction.Request) invocationOnMock.getArguments()[1]; + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + var mlTrainedModelResults = new ArrayList(); + for (int i = 0; i < request.numberOfDocuments(); i++) { + mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); + } + var response = new InferModelAction.Response(mlTrainedModelResults, "foo", true); + listener.onResponse(response); + return null; + }).when(client).execute(same(InferModelAction.INSTANCE), any(InferModelAction.Request.class), any(ActionListener.class)); + + var service = createService(client); + + var gotResults = new AtomicBoolean(); + var resultsListener = ActionListener.>wrap(chunkedResponse -> { + assertThat(chunkedResponse, hasSize(1)); + assertThat(chunkedResponse.get(0), instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var sparseResults = (InferenceChunkedTextEmbeddingFloatResults) chunkedResponse.get(0); + assertThat(sparseResults.chunks(), hasSize(numChunks)); + + gotResults.set(true); + }, ESTestCase::fail); + + // Create model using the word boundary chunker. + var model = new MultilingualE5SmallModel( + "foo", + TaskType.TEXT_EMBEDDING, + "e5", + new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform", null), + new WordBoundaryChunkingSettings(wordsPerChunk, 0) + ); + + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + + // For the given input we know how many requests will be made + service.chunkedInfer( + model, + null, + List.of(input), + Map.of(), + InputType.SEARCH, + new ChunkingOptions(null, null), + InferenceAction.Request.DEFAULT_TIMEOUT, + latchedListener + ); + + latch.await(); + assertTrue("Listener not called with results", gotResults.get()); + } + public void testParsePersistedConfig_Rerank() { // with task settings { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java index 6f28301078853..906a825e49561 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; @@ -109,7 +111,7 @@ public void testParseRequestConfig_CreatesGoogleVertexAiEmbeddingsModel() throws projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(true, InputType.INGEST), getSecretSettingsMap(serviceAccountJson) ), modelListener @@ -154,7 +156,7 @@ public void testParseRequestConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenChun projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(true, InputType.INGEST), createRandomChunkingSettingsMap(), getSecretSettingsMap(serviceAccountJson) ), @@ -200,7 +202,7 @@ public void testParseRequestConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenChun projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(false, InputType.SEARCH), getSecretSettingsMap(serviceAccountJson) ), modelListener @@ -281,7 +283,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws I "project" ) ), - getTaskSettingsMap(true), + getTaskSettingsMap(true, InputType.SEARCH), getSecretSettingsMap("{}") ); config.put("extra_key", "value"); @@ -308,7 +310,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMa ); serviceSettings.put("extra_key", "value"); - var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true), getSecretSettingsMap("{}")); + var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true, InputType.CLUSTERING), getSecretSettingsMap("{}")); var failureListener = getModelListenerForException( ElasticsearchStatusException.class, @@ -362,7 +364,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap "project" ) ), - getTaskSettingsMap(true), + getTaskSettingsMap(true, null), secretSettings ); @@ -399,7 +401,7 @@ public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsM true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.SEARCH), getSecretSettingsMap(serviceAccountJson) ); @@ -417,7 +419,7 @@ public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsM assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.SEARCH))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -447,7 +449,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAGoogleVertexAiEmbeddings true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), createRandomChunkingSettingsMap(), getSecretSettingsMap(serviceAccountJson) ); @@ -466,7 +468,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAGoogleVertexAiEmbeddings assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } @@ -497,7 +499,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChun true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), getSecretSettingsMap(serviceAccountJson) ); @@ -515,7 +517,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChun assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } @@ -573,7 +575,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.INGEST), getSecretSettingsMap(serviceAccountJson) ); persistedConfig.config().put("extra_key", "value"); @@ -592,7 +594,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.INGEST))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -625,7 +627,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), secretSettingsMap ); @@ -643,7 +645,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -676,7 +678,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists var persistedConfig = getPersistedConfigMap( serviceSettingsMap, - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.CLUSTERING), getSecretSettingsMap(serviceAccountJson) ); @@ -694,7 +696,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.CLUSTERING))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -711,7 +713,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists """; try (var service = createGoogleVertexAiService()) { - var taskSettings = getTaskSettingsMap(autoTruncate); + var taskSettings = getTaskSettingsMap(autoTruncate, InputType.SEARCH); taskSettings.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap( @@ -745,7 +747,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.SEARCH))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -770,7 +772,7 @@ public void testParsePersistedConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenCh true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), createRandomChunkingSettingsMap() ); @@ -783,7 +785,7 @@ public void testParsePersistedConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenCh assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -808,7 +810,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting true ) ), - getTaskSettingsMap(autoTruncate) + getTaskSettingsMap(autoTruncate, null) ); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -820,7 +822,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -838,12 +840,44 @@ public void testGetConfiguration() throws Exception { { "task_type": "text_embedding", "configuration": { + "input_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Input Type", + "options": [ + { + "label": "classification", + "value": "classification" + }, + { + "label": "clustering", + "value": "clustering" + }, + { + "label": "ingest", + "value": "ingest" + }, + { + "label": "search", + "value": "search" + } + ], + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the type of input passed to the model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + }, "auto_truncate": { "default_value": null, "depends_on": [], "display": "toggle", "label": "Auto Truncate", - "order": 1, + "order": 2, "required": false, "sensitive": false, "tooltip": "Specifies if the API truncates inputs longer than the maximum token length automatically.", @@ -1005,11 +1039,15 @@ private static ActionListener getModelListenerForException(Class excep }); } - private static Map getTaskSettingsMap(Boolean autoTruncate) { + private static Map getTaskSettingsMap(Boolean autoTruncate, @Nullable InputType inputType) { var taskSettings = new HashMap(); taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate); + if (inputType != null) { + taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, inputType.toString()); + } + return taskSettings; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java index 68d03d350d06e..7836c5c15cfb1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java @@ -10,14 +10,18 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; +import org.hamcrest.MatcherAssert; import java.net.URI; import java.net.URISyntaxException; +import java.util.Map; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettingsTests.getTaskSettingsMap; import static org.hamcrest.Matchers.is; public class GoogleVertexAiEmbeddingsModelTests extends ESTestCase { @@ -45,6 +49,75 @@ public void testBuildUri() throws URISyntaxException { ); } + public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreEmpty_AndInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, Map.of(), InputType.UNSPECIFIED); + + MatcherAssert.assertThat(overriddenModel, is(model)); + } + + public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreNull_AndInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, null, InputType.UNSPECIFIED); + + MatcherAssert.assertThat(overriddenModel, is(model)); + } + + public void testOverrideWith_SetsInputTypeToOverride_WhenFieldIsNullInModelTaskSettings_AndNullInRequestTaskSettings() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingStoredTaskSettings() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingRequestTaskSettings() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.CLUSTERING), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_OverridesInputType_WithRequestTaskSettingsSearch_WhenRequestInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.SEARCH), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotSetInputType_FromRequest_IfInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, null); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotSetInputType_WhenRequestTaskSettingsIsNull_AndRequestInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.INGEST); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotOverrideModelUri() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, Map.of(), null); + + MatcherAssert.assertThat(overriddenModel.uri(), is(model.uri())); + } + public static GoogleVertexAiEmbeddingsModel createModel( String location, String projectId, @@ -58,12 +131,37 @@ public static GoogleVertexAiEmbeddingsModel createModel( "service", uri, new GoogleVertexAiEmbeddingsServiceSettings(location, projectId, modelId, false, null, null, null, null), - new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE), + new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE, null), new GoogleVertexAiSecretSettings(new SecureString(serviceAccountJson.toCharArray())) ); } - public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate) { + public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate, @Nullable InputType inputType) { + return new GoogleVertexAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new GoogleVertexAiEmbeddingsServiceSettings( + "location", + "projectId", + modelId, + false, + null, + null, + SimilarityMeasure.DOT_PRODUCT, + null + ), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType), + null, + new GoogleVertexAiSecretSettings(new SecureString("testString".toCharArray())) + ); + } + + public static GoogleVertexAiEmbeddingsModel createRandomizedModel( + String modelId, + @Nullable Boolean autoTruncate, + @Nullable InputType inputType + ) { return new GoogleVertexAiEmbeddingsModel( "id", TaskType.TEXT_EMBEDDING, @@ -78,7 +176,7 @@ public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullabl SimilarityMeasure.DOT_PRODUCT, null ), - new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType), null, new GoogleVertexAiSecretSettings(new SecureString(randomAlphaOfLength(8).toCharArray())) ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java index 1e9a2f435cb08..a49e0f2e3f57d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -21,9 +23,14 @@ public void testFromMap_ReturnsEmptySettings_IfMapEmpty() { assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); } + public void testFromMap_ReturnsEmptySettings_IfMapNull() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(null); + assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); + } + public void testFromMap_DoesNotThrowValidationException_IfAutoTruncateIsMissing() { var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of("unrelated", true))); - assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null))); } public void testFromMap_ExtractsAutoTruncate() { @@ -31,6 +38,40 @@ public void testFromMap_ExtractsAutoTruncate() { var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate)) ); - assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate, null))); + } + + public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, "invalid")) + ) + ); + } + + public void testFromMap_ExtractsInputType() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.INGEST.toString())) + ); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null, InputType.INGEST))); + } + + public void testFromMap_ThrowsValidationException_IfInputTypeIsInvalidValue() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, "abc")) + ) + ); + } + + public void testFromMap_ThrowsValidationException_IfInputTypeIsUnspecified() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.UNSPECIFIED.toString())) + ) + ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java index 5b87bbc3c42c8..0a390b114702c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java @@ -8,21 +8,30 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; +import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.inference.InputTypeTests.randomWithoutUnspecified; import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.VALID_REQUEST_VALUES; import static org.hamcrest.Matchers.is; public class GoogleVertexAiEmbeddingsTaskSettingsTests extends AbstractBWCWireSerializationTestCase { @@ -39,6 +48,9 @@ public void testUpdatedTaskSettings() { if (newSettings.autoTruncate() != null) { newSettingsMap.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, newSettings.autoTruncate()); } + if (newSettings.getInputType() != null) { + newSettingsMap.put(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, newSettings.getInputType().toString()); + } GoogleVertexAiEmbeddingsTaskSettings updatedSettings = (GoogleVertexAiEmbeddingsTaskSettings) initialSettings.updatedTaskSettings( Collections.unmodifiableMap(newSettingsMap) ); @@ -47,56 +59,144 @@ public void testUpdatedTaskSettings() { } else { assertEquals(newSettings.autoTruncate(), updatedSettings.autoTruncate()); } + if (newSettings.getInputType() == null) { + assertEquals(initialSettings.getInputType(), updatedSettings.getInputType()); + } else { + assertEquals(newSettings.getInputType(), updatedSettings.getInputType()); + } + } + + public void testFromMap_CreatesEmptySettings_WhenAllFieldsAreNull() { + MatcherAssert.assertThat( + GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()), + is(new GoogleVertexAiEmbeddingsTaskSettings(null, null)) + ); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).getInputType()); + } + + public void testFromMap_CreatesEmptySettings_WhenMapIsNull() { + MatcherAssert.assertThat( + GoogleVertexAiEmbeddingsTaskSettings.fromMap(null), + is(new GoogleVertexAiEmbeddingsTaskSettings(null, null)) + ); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(null).autoTruncate()); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(null).getInputType()); } public void testFromMap_AutoTruncateIsSet() { var autoTruncate = true; - var taskSettingsMap = getTaskSettingsMap(autoTruncate); + var taskSettingsMap = getTaskSettingsMap(autoTruncate, null); var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); - assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); } public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { - var taskSettings = getTaskSettingsMap("invalid"); + var taskSettings = getTaskSettingsMap("invalid", null); expectThrows(ValidationException.class, () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettings)); } public void testFromMap_AutoTruncateIsNull() { - var taskSettingsMap = getTaskSettingsMap(null); + var taskSettingsMap = getTaskSettingsMap(null, null); var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); // needed, because of constructors being ambiguous otherwise Boolean nullBoolean = null; - assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean))); + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean, null))); } - public void testFromMap_DoesNotThrow_WithEmptyMap() { - assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + public void testFromMap_ReturnsFailure_WhenInputTypeIsInvalid() { + var exception = expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, "abc")) + ) + ); + + assertThat( + exception.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] Invalid value [abc] received. [input_type] must be one of [%s];", + getValidValuesSortedAndCombined(VALID_REQUEST_VALUES) + ) + ) + ); + } + + public void testFromMap_ReturnsFailure_WhenInputTypeIsUnspecified() { + var exception = expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.UNSPECIFIED.toString())) + ) + ); + + assertThat( + exception.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] Invalid value [unspecified] received. [input_type] must be one of [%s];", + getValidValuesSortedAndCombined(VALID_REQUEST_VALUES) + ) + ) + ); } public void testOf_UseRequestSettings() { var originalAutoTruncate = true; - var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, null); var requestAutoTruncate = originalAutoTruncate == false; - var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate); + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate, null); - assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(requestAutoTruncate)); + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(requestAutoTruncate) + ); + } + + public void testOf_UseRequestSettings_AndRequestInputType() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, InputType.SEARCH); + + var requestAutoTruncate = originalAutoTruncate == false; + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate, null); + + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, InputType.INGEST).getInputType(), + is(InputType.INGEST) + ); } public void testOf_UseOriginalSettings() { var originalAutoTruncate = true; - var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, null); - var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null); - assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(originalAutoTruncate)); + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(originalAutoTruncate) + ); + } + + public void testOf_UseOriginalSettings_WithInputType() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, InputType.INGEST); + + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null); + + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(originalAutoTruncate) + ); } public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { - var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true)); + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); settings.toXContent(builder, null); @@ -107,7 +207,7 @@ public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { } public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { - var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null)); + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); settings.toXContent(builder, null); @@ -117,6 +217,25 @@ public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { {}""")); } + public void testToXContent_WritesInputTypeIfNotNull() throws IOException { + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true, InputType.INGEST)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input_type":"ingest","auto_truncate":true}""")); + } + + public void testToXContent_ThrowsAssertionFailure_WhenInputTypeIsUnspecified() { + var thrownException = expectThrows( + AssertionError.class, + () -> new GoogleVertexAiEmbeddingsTaskSettings(false, InputType.UNSPECIFIED) + ); + assertThat(thrownException.getMessage(), is("received invalid input type value [unspecified]")); + } + @Override protected Writeable.Reader instanceReader() { return GoogleVertexAiEmbeddingsTaskSettings::new; @@ -137,20 +256,37 @@ protected GoogleVertexAiEmbeddingsTaskSettings mutateInstanceForVersion( GoogleVertexAiEmbeddingsTaskSettings instance, TransportVersion version ) { + if (version.before(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) { + // default to null input type if node is on a version before input type was introduced + return new GoogleVertexAiEmbeddingsTaskSettings(instance.autoTruncate(), null); + } return instance; } private static GoogleVertexAiEmbeddingsTaskSettings createRandom() { - return new GoogleVertexAiEmbeddingsTaskSettings(randomFrom(new Boolean[] { null, randomBoolean() })); + var inputType = randomBoolean() ? randomWithoutUnspecified() : null; + var autoTruncate = randomFrom(new Boolean[] { null, randomBoolean() }); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType); + } + + private static > String getValidValuesSortedAndCombined(EnumSet validValues) { + var validValuesAsStrings = validValues.stream().map(value -> value.toString().toLowerCase(Locale.ROOT)).toArray(String[]::new); + Arrays.sort(validValuesAsStrings); + + return String.join(", ", validValuesAsStrings); } - private static Map getTaskSettingsMap(@Nullable Object autoTruncate) { + public static Map getTaskSettingsMap(@Nullable Object autoTruncate, @Nullable InputType inputType) { var map = new HashMap(); if (autoTruncate != null) { map.put(AUTO_TRUNCATE, autoTruncate); } + if (inputType != null) { + map.put(INPUT_TYPE, inputType.toString()); + } + return map; } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java index 767dd4d64a7d3..22ef35c3a46d3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java @@ -71,8 +71,10 @@ public void testValidate_ServiceThrowsException() { any() ); - assertThrows(ElasticsearchStatusException.class, () -> { - underTest.validate(mockInferenceService, mockModel, mockActionListener);}); + assertThrows( + ElasticsearchStatusException.class, + () -> { underTest.validate(mockInferenceService, mockModel, mockActionListener); } + ); verifyCallToService(false); } diff --git a/x-pack/plugin/kql/build.gradle b/x-pack/plugin/kql/build.gradle index 7e4df5654f225..054011a458fe0 100644 --- a/x-pack/plugin/kql/build.gradle +++ b/x-pack/plugin/kql/build.gradle @@ -26,14 +26,14 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) } -tasks.named('yamlRestTest') { +tasks.named('yamlRestTest').configure { usesDefaultDistribution() -}.configure { + /**************************************************************** * Enable QA/rest integration tests for snapshot builds only * * TODO: Enable for all builds upon this feature release * ****************************************************************/ - enabled = BuildParams.isSnapshotBuild() + enabled = buildParams.isSnapshotBuild() } /********************************** diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 index da015b699cb15..739fa5eb0c6eb 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 @@ -46,9 +46,26 @@ notQuery: ; nestedQuery - : fieldName COLON LEFT_CURLY_BRACKET query RIGHT_CURLY_BRACKET + : fieldName COLON LEFT_CURLY_BRACKET nestedSubQuery RIGHT_CURLY_BRACKET ; +nestedSubQuery + : nestedSubQuery operator=(AND|OR) nestedSubQuery #booleanNestedQuery + | nestedSimpleSubQuery #defaultNestedQuery + ; + +nestedSimpleSubQuery + : notQuery + | nestedQuery + | matchAllQuery + | nestedParenthesizedQuery + | existsQuery + | rangeQuery + | fieldQuery; + +nestedParenthesizedQuery + : LEFT_PARENTHESIS nestedSubQuery RIGHT_PARENTHESIS; + matchAllQuery : (WILDCARD COLON)? WILDCARD ; diff --git a/x-pack/plugin/kql/src/main/java/module-info.java b/x-pack/plugin/kql/src/main/java/module-info.java index 41e51033b9c70..e3bb6fb99bbd3 100644 --- a/x-pack/plugin/kql/src/main/java/module-info.java +++ b/x-pack/plugin/kql/src/main/java/module-info.java @@ -13,6 +13,7 @@ requires org.apache.lucene.queryparser; requires org.elasticsearch.logging; requires org.apache.lucene.core; + requires org.apache.lucene.join; exports org.elasticsearch.xpack.kql; exports org.elasticsearch.xpack.kql.parser; diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java index 5fe3a61c0a761..2d810a33190ca 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java @@ -9,6 +9,7 @@ import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.Token; +import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -20,6 +21,7 @@ import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import java.util.List; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BiFunction; @@ -56,15 +58,15 @@ public QueryBuilder toQueryBuilder(ParserRuleContext ctx) { @Override public QueryBuilder visitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { assert ctx.operator != null; - return isAndQuery(ctx) ? visitAndBooleanQuery(ctx) : visitOrBooleanQuery(ctx); + return isAndQuery(ctx) ? visitAndBooleanQuery(ctx.query()) : visitOrBooleanQuery(ctx.query()); } - public QueryBuilder visitAndBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + public QueryBuilder visitAndBooleanQuery(List clauses) { BoolQueryBuilder builder = QueryBuilders.boolQuery(); // TODO: KQLContext has an option to wrap the clauses into a filter instead of a must clause. Do we need it? - for (ParserRuleContext subQueryCtx : ctx.query()) { - if (subQueryCtx instanceof KqlBaseParser.BooleanQueryContext booleanSubQueryCtx && isAndQuery(booleanSubQueryCtx)) { + for (ParserRuleContext subQueryCtx : clauses) { + if (isAndQuery(subQueryCtx)) { typedParsing(this, subQueryCtx, BoolQueryBuilder.class).must().forEach(builder::must); } else { builder.must(typedParsing(this, subQueryCtx, QueryBuilder.class)); @@ -74,11 +76,11 @@ public QueryBuilder visitAndBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) return rewriteConjunctionQuery(builder); } - public QueryBuilder visitOrBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + public QueryBuilder visitOrBooleanQuery(List clauses) { BoolQueryBuilder builder = QueryBuilders.boolQuery().minimumShouldMatch(1); - for (ParserRuleContext subQueryCtx : ctx.query()) { - if (subQueryCtx instanceof KqlBaseParser.BooleanQueryContext booleanSubQueryCtx && isOrQuery(booleanSubQueryCtx)) { + for (ParserRuleContext subQueryCtx : clauses) { + if (isOrQuery(subQueryCtx)) { typedParsing(this, subQueryCtx, BoolQueryBuilder.class).should().forEach(builder::should); } else { builder.should(typedParsing(this, subQueryCtx, QueryBuilder.class)); @@ -100,8 +102,40 @@ public QueryBuilder visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryCont @Override public QueryBuilder visitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { - // TODO: implementation - return new MatchNoneQueryBuilder(); + String nestedFieldName = extractText(ctx.fieldName()); + + if (kqlParsingContext.isNestedField(nestedFieldName) == false) { + throw new KqlParsingException( + "[{}] is not a valid nested field name.", + ctx.start.getLine(), + ctx.start.getCharPositionInLine(), + nestedFieldName + ); + } + QueryBuilder subQuery = kqlParsingContext.withNestedPath( + nestedFieldName, + () -> typedParsing(this, ctx.nestedSubQuery(), QueryBuilder.class) + ); + + if (subQuery instanceof MatchNoneQueryBuilder) { + return subQuery; + } + + return wrapWithNestedQuery( + nestedFieldName, + QueryBuilders.nestedQuery(kqlParsingContext.fullFieldName(nestedFieldName), subQuery, ScoreMode.None) + ); + } + + @Override + public QueryBuilder visitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { + assert ctx.operator != null; + return isAndQuery(ctx) ? visitAndBooleanQuery(ctx.nestedSubQuery()) : visitOrBooleanQuery(ctx.nestedSubQuery()); + } + + @Override + public QueryBuilder visitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { + return typedParsing(this, ctx.nestedSubQuery(), QueryBuilder.class); } @Override @@ -116,7 +150,7 @@ public QueryBuilder visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().minimumShouldMatch(1); withFields(ctx.fieldName(), (fieldName, mappedFieldType) -> { if (isRuntimeField(mappedFieldType) == false) { - boolQueryBuilder.should(QueryBuilders.existsQuery(fieldName)); + boolQueryBuilder.should(wrapWithNestedQuery(fieldName, QueryBuilders.existsQuery(fieldName))); } }); @@ -137,7 +171,7 @@ public QueryBuilder visitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { rangeQuery.timeZone(kqlParsingContext.timeZone().getId()); } - boolQueryBuilder.should(rangeQuery); + boolQueryBuilder.should(wrapWithNestedQuery(fieldName, rangeQuery)); }); return rewriteDisjunctionQuery(boolQueryBuilder); @@ -200,24 +234,33 @@ public QueryBuilder visitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } if (fieldQuery != null) { - boolQueryBuilder.should(fieldQuery); + boolQueryBuilder.should(wrapWithNestedQuery(fieldName, fieldQuery)); } }); return rewriteDisjunctionQuery(boolQueryBuilder); } - private static boolean isAndQuery(KqlBaseParser.BooleanQueryContext ctx) { - return ctx.operator.getType() == KqlBaseParser.AND; + private static boolean isAndQuery(ParserRuleContext ctx) { + return switch (ctx) { + case KqlBaseParser.BooleanQueryContext booleanQueryCtx -> booleanQueryCtx.operator.getType() == KqlBaseParser.AND; + case KqlBaseParser.BooleanNestedQueryContext booleanNestedCtx -> booleanNestedCtx.operator.getType() == KqlBaseParser.AND; + default -> false; + }; } - private static boolean isOrQuery(KqlBaseParser.BooleanQueryContext ctx) { - return ctx.operator.getType() == KqlBaseParser.OR; + private static boolean isOrQuery(ParserRuleContext ctx) { + return switch (ctx) { + case KqlBaseParser.BooleanQueryContext booleanQueryCtx -> booleanQueryCtx.operator.getType() == KqlBaseParser.OR; + case KqlBaseParser.BooleanNestedQueryContext booleanNestedCtx -> booleanNestedCtx.operator.getType() == KqlBaseParser.OR; + default -> false; + }; } private void withFields(KqlBaseParser.FieldNameContext ctx, BiConsumer fieldConsummer) { assert ctx != null : "Field ctx cannot be null"; String fieldNamePattern = extractText(ctx); + Set fieldNames = kqlParsingContext.resolveFieldNames(fieldNamePattern); if (ctx.value.getType() == KqlBaseParser.QUOTED_STRING && Regex.isSimpleMatchPattern(fieldNamePattern)) { @@ -267,4 +310,14 @@ private BiFunction rangeOperation( default -> throw new IllegalArgumentException(format(null, "Invalid range operator {}\"", operator.getText())); }; } + + private QueryBuilder wrapWithNestedQuery(String fieldName, QueryBuilder query) { + String nestedPath = kqlParsingContext.nestedPath(fieldName); + + if (nestedPath == null || nestedPath.equals(kqlParsingContext.currentNestedPath())) { + return query; + } + + return wrapWithNestedQuery(nestedPath, QueryBuilders.nestedQuery(nestedPath, query, ScoreMode.None)); + } } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp index 7af37d7e3c3b5..fbfe52afa4cd5 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp @@ -42,6 +42,9 @@ query simpleQuery notQuery nestedQuery +nestedSubQuery +nestedSimpleSubQuery +nestedParenthesizedQuery matchAllQuery parenthesizedQuery rangeQuery @@ -54,4 +57,4 @@ fieldName atn: -[4, 1, 16, 136, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 1, 0, 3, 0, 30, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 40, 8, 1, 10, 1, 12, 1, 43, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 3, 5, 66, 8, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 4, 8, 79, 8, 8, 11, 8, 12, 8, 80, 1, 8, 3, 8, 84, 8, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 100, 8, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 3, 11, 107, 8, 11, 1, 12, 3, 12, 110, 8, 12, 1, 12, 4, 12, 113, 8, 12, 11, 12, 12, 12, 114, 1, 12, 3, 12, 118, 8, 12, 1, 12, 1, 12, 3, 12, 122, 8, 12, 1, 12, 1, 12, 3, 12, 126, 8, 12, 1, 12, 3, 12, 129, 8, 12, 1, 13, 1, 13, 1, 13, 3, 13, 134, 8, 13, 1, 13, 0, 1, 2, 14, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 145, 0, 29, 1, 0, 0, 0, 2, 33, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 57, 1, 0, 0, 0, 10, 65, 1, 0, 0, 0, 12, 69, 1, 0, 0, 0, 14, 73, 1, 0, 0, 0, 16, 83, 1, 0, 0, 0, 18, 85, 1, 0, 0, 0, 20, 99, 1, 0, 0, 0, 22, 106, 1, 0, 0, 0, 24, 128, 1, 0, 0, 0, 26, 133, 1, 0, 0, 0, 28, 30, 3, 2, 1, 0, 29, 28, 1, 0, 0, 0, 29, 30, 1, 0, 0, 0, 30, 31, 1, 0, 0, 0, 31, 32, 5, 0, 0, 1, 32, 1, 1, 0, 0, 0, 33, 34, 6, 1, -1, 0, 34, 35, 3, 4, 2, 0, 35, 41, 1, 0, 0, 0, 36, 37, 10, 2, 0, 0, 37, 38, 7, 0, 0, 0, 38, 40, 3, 2, 1, 2, 39, 36, 1, 0, 0, 0, 40, 43, 1, 0, 0, 0, 41, 39, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 41, 1, 0, 0, 0, 44, 53, 3, 6, 3, 0, 45, 53, 3, 8, 4, 0, 46, 53, 3, 12, 6, 0, 47, 53, 3, 10, 5, 0, 48, 53, 3, 18, 9, 0, 49, 53, 3, 14, 7, 0, 50, 53, 3, 20, 10, 0, 51, 53, 3, 22, 11, 0, 52, 44, 1, 0, 0, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 5, 4, 0, 0, 55, 56, 3, 4, 2, 0, 56, 7, 1, 0, 0, 0, 57, 58, 3, 26, 13, 0, 58, 59, 5, 5, 0, 0, 59, 60, 5, 12, 0, 0, 60, 61, 3, 2, 1, 0, 61, 62, 5, 13, 0, 0, 62, 9, 1, 0, 0, 0, 63, 64, 5, 16, 0, 0, 64, 66, 5, 5, 0, 0, 65, 63, 1, 0, 0, 0, 65, 66, 1, 0, 0, 0, 66, 67, 1, 0, 0, 0, 67, 68, 5, 16, 0, 0, 68, 11, 1, 0, 0, 0, 69, 70, 5, 10, 0, 0, 70, 71, 3, 2, 1, 0, 71, 72, 5, 11, 0, 0, 72, 13, 1, 0, 0, 0, 73, 74, 3, 26, 13, 0, 74, 75, 7, 1, 0, 0, 75, 76, 3, 16, 8, 0, 76, 15, 1, 0, 0, 0, 77, 79, 7, 2, 0, 0, 78, 77, 1, 0, 0, 0, 79, 80, 1, 0, 0, 0, 80, 78, 1, 0, 0, 0, 80, 81, 1, 0, 0, 0, 81, 84, 1, 0, 0, 0, 82, 84, 5, 15, 0, 0, 83, 78, 1, 0, 0, 0, 83, 82, 1, 0, 0, 0, 84, 17, 1, 0, 0, 0, 85, 86, 3, 26, 13, 0, 86, 87, 5, 5, 0, 0, 87, 88, 5, 16, 0, 0, 88, 19, 1, 0, 0, 0, 89, 90, 3, 26, 13, 0, 90, 91, 5, 5, 0, 0, 91, 92, 3, 24, 12, 0, 92, 100, 1, 0, 0, 0, 93, 94, 3, 26, 13, 0, 94, 95, 5, 5, 0, 0, 95, 96, 5, 10, 0, 0, 96, 97, 3, 24, 12, 0, 97, 98, 5, 11, 0, 0, 98, 100, 1, 0, 0, 0, 99, 89, 1, 0, 0, 0, 99, 93, 1, 0, 0, 0, 100, 21, 1, 0, 0, 0, 101, 107, 3, 24, 12, 0, 102, 103, 5, 10, 0, 0, 103, 104, 3, 24, 12, 0, 104, 105, 5, 11, 0, 0, 105, 107, 1, 0, 0, 0, 106, 101, 1, 0, 0, 0, 106, 102, 1, 0, 0, 0, 107, 23, 1, 0, 0, 0, 108, 110, 7, 3, 0, 0, 109, 108, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 112, 1, 0, 0, 0, 111, 113, 7, 2, 0, 0, 112, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 112, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 7, 3, 0, 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 129, 1, 0, 0, 0, 119, 121, 7, 0, 0, 0, 120, 122, 7, 3, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 129, 1, 0, 0, 0, 123, 125, 5, 4, 0, 0, 124, 126, 7, 0, 0, 0, 125, 124, 1, 0, 0, 0, 125, 126, 1, 0, 0, 0, 126, 129, 1, 0, 0, 0, 127, 129, 5, 15, 0, 0, 128, 109, 1, 0, 0, 0, 128, 119, 1, 0, 0, 0, 128, 123, 1, 0, 0, 0, 128, 127, 1, 0, 0, 0, 129, 25, 1, 0, 0, 0, 130, 134, 5, 14, 0, 0, 131, 134, 5, 15, 0, 0, 132, 134, 5, 16, 0, 0, 133, 130, 1, 0, 0, 0, 133, 131, 1, 0, 0, 0, 133, 132, 1, 0, 0, 0, 134, 27, 1, 0, 0, 0, 15, 29, 41, 52, 65, 80, 83, 99, 106, 109, 114, 117, 121, 125, 128, 133] \ No newline at end of file +[4, 1, 16, 165, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 1, 0, 3, 0, 36, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 46, 8, 1, 10, 1, 12, 1, 49, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 59, 8, 2, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 76, 8, 5, 10, 5, 12, 5, 79, 9, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 87, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 3, 8, 95, 8, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 4, 11, 108, 8, 11, 11, 11, 12, 11, 109, 1, 11, 3, 11, 113, 8, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 3, 13, 129, 8, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 3, 14, 136, 8, 14, 1, 15, 3, 15, 139, 8, 15, 1, 15, 4, 15, 142, 8, 15, 11, 15, 12, 15, 143, 1, 15, 3, 15, 147, 8, 15, 1, 15, 1, 15, 3, 15, 151, 8, 15, 1, 15, 1, 15, 3, 15, 155, 8, 15, 1, 15, 3, 15, 158, 8, 15, 1, 16, 1, 16, 1, 16, 3, 16, 163, 8, 16, 1, 16, 0, 2, 2, 10, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 177, 0, 35, 1, 0, 0, 0, 2, 39, 1, 0, 0, 0, 4, 58, 1, 0, 0, 0, 6, 60, 1, 0, 0, 0, 8, 63, 1, 0, 0, 0, 10, 69, 1, 0, 0, 0, 12, 86, 1, 0, 0, 0, 14, 88, 1, 0, 0, 0, 16, 94, 1, 0, 0, 0, 18, 98, 1, 0, 0, 0, 20, 102, 1, 0, 0, 0, 22, 112, 1, 0, 0, 0, 24, 114, 1, 0, 0, 0, 26, 128, 1, 0, 0, 0, 28, 135, 1, 0, 0, 0, 30, 157, 1, 0, 0, 0, 32, 162, 1, 0, 0, 0, 34, 36, 3, 2, 1, 0, 35, 34, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 37, 1, 0, 0, 0, 37, 38, 5, 0, 0, 1, 38, 1, 1, 0, 0, 0, 39, 40, 6, 1, -1, 0, 40, 41, 3, 4, 2, 0, 41, 47, 1, 0, 0, 0, 42, 43, 10, 2, 0, 0, 43, 44, 7, 0, 0, 0, 44, 46, 3, 2, 1, 2, 45, 42, 1, 0, 0, 0, 46, 49, 1, 0, 0, 0, 47, 45, 1, 0, 0, 0, 47, 48, 1, 0, 0, 0, 48, 3, 1, 0, 0, 0, 49, 47, 1, 0, 0, 0, 50, 59, 3, 6, 3, 0, 51, 59, 3, 8, 4, 0, 52, 59, 3, 18, 9, 0, 53, 59, 3, 16, 8, 0, 54, 59, 3, 24, 12, 0, 55, 59, 3, 20, 10, 0, 56, 59, 3, 26, 13, 0, 57, 59, 3, 28, 14, 0, 58, 50, 1, 0, 0, 0, 58, 51, 1, 0, 0, 0, 58, 52, 1, 0, 0, 0, 58, 53, 1, 0, 0, 0, 58, 54, 1, 0, 0, 0, 58, 55, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 58, 57, 1, 0, 0, 0, 59, 5, 1, 0, 0, 0, 60, 61, 5, 4, 0, 0, 61, 62, 3, 4, 2, 0, 62, 7, 1, 0, 0, 0, 63, 64, 3, 32, 16, 0, 64, 65, 5, 5, 0, 0, 65, 66, 5, 12, 0, 0, 66, 67, 3, 10, 5, 0, 67, 68, 5, 13, 0, 0, 68, 9, 1, 0, 0, 0, 69, 70, 6, 5, -1, 0, 70, 71, 3, 12, 6, 0, 71, 77, 1, 0, 0, 0, 72, 73, 10, 2, 0, 0, 73, 74, 7, 0, 0, 0, 74, 76, 3, 10, 5, 2, 75, 72, 1, 0, 0, 0, 76, 79, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 77, 78, 1, 0, 0, 0, 78, 11, 1, 0, 0, 0, 79, 77, 1, 0, 0, 0, 80, 87, 3, 6, 3, 0, 81, 87, 3, 8, 4, 0, 82, 87, 3, 14, 7, 0, 83, 87, 3, 24, 12, 0, 84, 87, 3, 20, 10, 0, 85, 87, 3, 26, 13, 0, 86, 80, 1, 0, 0, 0, 86, 81, 1, 0, 0, 0, 86, 82, 1, 0, 0, 0, 86, 83, 1, 0, 0, 0, 86, 84, 1, 0, 0, 0, 86, 85, 1, 0, 0, 0, 87, 13, 1, 0, 0, 0, 88, 89, 5, 10, 0, 0, 89, 90, 3, 10, 5, 0, 90, 91, 5, 11, 0, 0, 91, 15, 1, 0, 0, 0, 92, 93, 5, 16, 0, 0, 93, 95, 5, 5, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 97, 5, 16, 0, 0, 97, 17, 1, 0, 0, 0, 98, 99, 5, 10, 0, 0, 99, 100, 3, 2, 1, 0, 100, 101, 5, 11, 0, 0, 101, 19, 1, 0, 0, 0, 102, 103, 3, 32, 16, 0, 103, 104, 7, 1, 0, 0, 104, 105, 3, 22, 11, 0, 105, 21, 1, 0, 0, 0, 106, 108, 7, 2, 0, 0, 107, 106, 1, 0, 0, 0, 108, 109, 1, 0, 0, 0, 109, 107, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 113, 1, 0, 0, 0, 111, 113, 5, 15, 0, 0, 112, 107, 1, 0, 0, 0, 112, 111, 1, 0, 0, 0, 113, 23, 1, 0, 0, 0, 114, 115, 3, 32, 16, 0, 115, 116, 5, 5, 0, 0, 116, 117, 5, 16, 0, 0, 117, 25, 1, 0, 0, 0, 118, 119, 3, 32, 16, 0, 119, 120, 5, 5, 0, 0, 120, 121, 3, 30, 15, 0, 121, 129, 1, 0, 0, 0, 122, 123, 3, 32, 16, 0, 123, 124, 5, 5, 0, 0, 124, 125, 5, 10, 0, 0, 125, 126, 3, 30, 15, 0, 126, 127, 5, 11, 0, 0, 127, 129, 1, 0, 0, 0, 128, 118, 1, 0, 0, 0, 128, 122, 1, 0, 0, 0, 129, 27, 1, 0, 0, 0, 130, 136, 3, 30, 15, 0, 131, 132, 5, 10, 0, 0, 132, 133, 3, 30, 15, 0, 133, 134, 5, 11, 0, 0, 134, 136, 1, 0, 0, 0, 135, 130, 1, 0, 0, 0, 135, 131, 1, 0, 0, 0, 136, 29, 1, 0, 0, 0, 137, 139, 7, 3, 0, 0, 138, 137, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 141, 1, 0, 0, 0, 140, 142, 7, 2, 0, 0, 141, 140, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 141, 1, 0, 0, 0, 143, 144, 1, 0, 0, 0, 144, 146, 1, 0, 0, 0, 145, 147, 7, 3, 0, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 158, 1, 0, 0, 0, 148, 150, 7, 0, 0, 0, 149, 151, 7, 3, 0, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 158, 1, 0, 0, 0, 152, 154, 5, 4, 0, 0, 153, 155, 7, 0, 0, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 158, 1, 0, 0, 0, 156, 158, 5, 15, 0, 0, 157, 138, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157, 152, 1, 0, 0, 0, 157, 156, 1, 0, 0, 0, 158, 31, 1, 0, 0, 0, 159, 163, 5, 14, 0, 0, 160, 163, 5, 15, 0, 0, 161, 163, 5, 16, 0, 0, 162, 159, 1, 0, 0, 0, 162, 160, 1, 0, 0, 0, 162, 161, 1, 0, 0, 0, 163, 33, 1, 0, 0, 0, 17, 35, 47, 58, 77, 86, 94, 109, 112, 128, 135, 138, 143, 146, 150, 154, 157, 162] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java index e1015edcd4931..c3fc1281b6fd9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java @@ -92,6 +92,54 @@ class KqlBaseBaseListener implements KqlBaseListener { *

The default implementation does nothing.

*/ @Override public void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java index 3973a647c8cd8..84c882c2e2bcf 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java @@ -62,6 +62,34 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java index 49f2031208642..a44ecf1ecad23 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java @@ -79,6 +79,50 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx); + /** + * Enter a parse tree produced by the {@code booleanNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void enterBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx); + /** + * Exit a parse tree produced by the {@code booleanNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void exitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx); + /** + * Enter a parse tree produced by the {@code defaultNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void enterDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx); + /** + * Exit a parse tree produced by the {@code defaultNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void exitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx); + /** + * Enter a parse tree produced by {@link KqlBaseParser#nestedSimpleSubQuery}. + * @param ctx the parse tree + */ + void enterNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx); + /** + * Exit a parse tree produced by {@link KqlBaseParser#nestedSimpleSubQuery}. + * @param ctx the parse tree + */ + void exitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx); + /** + * Enter a parse tree produced by {@link KqlBaseParser#nestedParenthesizedQuery}. + * @param ctx the parse tree + */ + void enterNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx); + /** + * Exit a parse tree produced by {@link KqlBaseParser#nestedParenthesizedQuery}. + * @param ctx the parse tree + */ + void exitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java index 118ac32aadd61..7e797b9edbb93 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java @@ -30,12 +30,15 @@ class KqlBaseParser extends Parser { RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static final int RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_notQuery = 3, - RULE_nestedQuery = 4, RULE_matchAllQuery = 5, RULE_parenthesizedQuery = 6, - RULE_rangeQuery = 7, RULE_rangeQueryValue = 8, RULE_existsQuery = 9, RULE_fieldQuery = 10, - RULE_fieldLessQuery = 11, RULE_fieldQueryValue = 12, RULE_fieldName = 13; + RULE_nestedQuery = 4, RULE_nestedSubQuery = 5, RULE_nestedSimpleSubQuery = 6, + RULE_nestedParenthesizedQuery = 7, RULE_matchAllQuery = 8, RULE_parenthesizedQuery = 9, + RULE_rangeQuery = 10, RULE_rangeQueryValue = 11, RULE_existsQuery = 12, + RULE_fieldQuery = 13, RULE_fieldLessQuery = 14, RULE_fieldQueryValue = 15, + RULE_fieldName = 16; private static String[] makeRuleNames() { return new String[] { - "topLevelQuery", "query", "simpleQuery", "notQuery", "nestedQuery", "matchAllQuery", + "topLevelQuery", "query", "simpleQuery", "notQuery", "nestedQuery", "nestedSubQuery", + "nestedSimpleSubQuery", "nestedParenthesizedQuery", "matchAllQuery", "parenthesizedQuery", "rangeQuery", "rangeQueryValue", "existsQuery", "fieldQuery", "fieldLessQuery", "fieldQueryValue", "fieldName" }; @@ -139,17 +142,17 @@ public final TopLevelQueryContext topLevelQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(29); + setState(35); _errHandler.sync(this); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 115740L) != 0)) { { - setState(28); + setState(34); query(0); } } - setState(31); + setState(37); match(EOF); } } @@ -244,11 +247,11 @@ private QueryContext query(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(34); + setState(40); simpleQuery(); } _ctx.stop = _input.LT(-1); - setState(41); + setState(47); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,1,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -259,9 +262,9 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new BooleanQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(36); + setState(42); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(37); + setState(43); ((BooleanQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { @@ -272,12 +275,12 @@ private QueryContext query(int _p) throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(38); + setState(44); query(2); } } } - setState(43); + setState(49); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,1,_ctx); } @@ -343,62 +346,62 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { SimpleQueryContext _localctx = new SimpleQueryContext(_ctx, getState()); enterRule(_localctx, 4, RULE_simpleQuery); try { - setState(52); + setState(58); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(44); + setState(50); notQuery(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(45); + setState(51); nestedQuery(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(46); + setState(52); parenthesizedQuery(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(47); + setState(53); matchAllQuery(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(48); + setState(54); existsQuery(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(49); + setState(55); rangeQuery(); } break; case 7: enterOuterAlt(_localctx, 7); { - setState(50); + setState(56); fieldQuery(); } break; case 8: enterOuterAlt(_localctx, 8); { - setState(51); + setState(57); fieldLessQuery(); } break; @@ -447,9 +450,9 @@ public final NotQueryContext notQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(54); + setState(60); match(NOT); - setState(55); + setState(61); ((NotQueryContext)_localctx).subQuery = simpleQuery(); } } @@ -471,8 +474,8 @@ public FieldNameContext fieldName() { } public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } public TerminalNode LEFT_CURLY_BRACKET() { return getToken(KqlBaseParser.LEFT_CURLY_BRACKET, 0); } - public QueryContext query() { - return getRuleContext(QueryContext.class,0); + public NestedSubQueryContext nestedSubQuery() { + return getRuleContext(NestedSubQueryContext.class,0); } public TerminalNode RIGHT_CURLY_BRACKET() { return getToken(KqlBaseParser.RIGHT_CURLY_BRACKET, 0); } public NestedQueryContext(ParserRuleContext parent, int invokingState) { @@ -500,15 +503,15 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(57); + setState(63); fieldName(); - setState(58); + setState(64); match(COLON); - setState(59); + setState(65); match(LEFT_CURLY_BRACKET); - setState(60); - query(0); - setState(61); + setState(66); + nestedSubQuery(0); + setState(67); match(RIGHT_CURLY_BRACKET); } } @@ -523,6 +526,288 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") + public static class NestedSubQueryContext extends ParserRuleContext { + public NestedSubQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nestedSubQuery; } + + public NestedSubQueryContext() { } + public void copyFrom(NestedSubQueryContext ctx) { + super.copyFrom(ctx); + } + } + @SuppressWarnings("CheckReturnValue") + public static class BooleanNestedQueryContext extends NestedSubQueryContext { + public Token operator; + public List nestedSubQuery() { + return getRuleContexts(NestedSubQueryContext.class); + } + public NestedSubQueryContext nestedSubQuery(int i) { + return getRuleContext(NestedSubQueryContext.class,i); + } + public TerminalNode AND() { return getToken(KqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(KqlBaseParser.OR, 0); } + public BooleanNestedQueryContext(NestedSubQueryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterBooleanNestedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitBooleanNestedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitBooleanNestedQuery(this); + else return visitor.visitChildren(this); + } + } + @SuppressWarnings("CheckReturnValue") + public static class DefaultNestedQueryContext extends NestedSubQueryContext { + public NestedSimpleSubQueryContext nestedSimpleSubQuery() { + return getRuleContext(NestedSimpleSubQueryContext.class,0); + } + public DefaultNestedQueryContext(NestedSubQueryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterDefaultNestedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitDefaultNestedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitDefaultNestedQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NestedSubQueryContext nestedSubQuery() throws RecognitionException { + return nestedSubQuery(0); + } + + private NestedSubQueryContext nestedSubQuery(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + NestedSubQueryContext _localctx = new NestedSubQueryContext(_ctx, _parentState); + NestedSubQueryContext _prevctx = _localctx; + int _startState = 10; + enterRecursionRule(_localctx, 10, RULE_nestedSubQuery, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + { + _localctx = new DefaultNestedQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + + setState(70); + nestedSimpleSubQuery(); + } + _ctx.stop = _input.LT(-1); + setState(77); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,3,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + { + _localctx = new BooleanNestedQueryContext(new NestedSubQueryContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_nestedSubQuery); + setState(72); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(73); + ((BooleanNestedQueryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + ((BooleanNestedQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + setState(74); + nestedSubQuery(2); + } + } + } + setState(79); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,3,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class NestedSimpleSubQueryContext extends ParserRuleContext { + public NotQueryContext notQuery() { + return getRuleContext(NotQueryContext.class,0); + } + public NestedQueryContext nestedQuery() { + return getRuleContext(NestedQueryContext.class,0); + } + public NestedParenthesizedQueryContext nestedParenthesizedQuery() { + return getRuleContext(NestedParenthesizedQueryContext.class,0); + } + public ExistsQueryContext existsQuery() { + return getRuleContext(ExistsQueryContext.class,0); + } + public RangeQueryContext rangeQuery() { + return getRuleContext(RangeQueryContext.class,0); + } + public FieldQueryContext fieldQuery() { + return getRuleContext(FieldQueryContext.class,0); + } + public NestedSimpleSubQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nestedSimpleSubQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterNestedSimpleSubQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitNestedSimpleSubQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitNestedSimpleSubQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NestedSimpleSubQueryContext nestedSimpleSubQuery() throws RecognitionException { + NestedSimpleSubQueryContext _localctx = new NestedSimpleSubQueryContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_nestedSimpleSubQuery); + try { + setState(86); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(80); + notQuery(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(81); + nestedQuery(); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(82); + nestedParenthesizedQuery(); + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(83); + existsQuery(); + } + break; + case 5: + enterOuterAlt(_localctx, 5); + { + setState(84); + rangeQuery(); + } + break; + case 6: + enterOuterAlt(_localctx, 6); + { + setState(85); + fieldQuery(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class NestedParenthesizedQueryContext extends ParserRuleContext { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public NestedSubQueryContext nestedSubQuery() { + return getRuleContext(NestedSubQueryContext.class,0); + } + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public NestedParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nestedParenthesizedQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterNestedParenthesizedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitNestedParenthesizedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitNestedParenthesizedQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NestedParenthesizedQueryContext nestedParenthesizedQuery() throws RecognitionException { + NestedParenthesizedQueryContext _localctx = new NestedParenthesizedQueryContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_nestedParenthesizedQuery); + try { + enterOuterAlt(_localctx, 1); + { + setState(88); + match(LEFT_PARENTHESIS); + setState(89); + nestedSubQuery(0); + setState(90); + match(RIGHT_PARENTHESIS); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + @SuppressWarnings("CheckReturnValue") public static class MatchAllQueryContext extends ParserRuleContext { public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } @@ -551,23 +836,23 @@ public T accept(ParseTreeVisitor visitor) { public final MatchAllQueryContext matchAllQuery() throws RecognitionException { MatchAllQueryContext _localctx = new MatchAllQueryContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_matchAllQuery); + enterRule(_localctx, 16, RULE_matchAllQuery); try { enterOuterAlt(_localctx, 1); { - setState(65); + setState(94); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(63); + setState(92); match(WILDCARD); - setState(64); + setState(93); match(COLON); } break; } - setState(67); + setState(96); match(WILDCARD); } } @@ -610,15 +895,15 @@ public T accept(ParseTreeVisitor visitor) { public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_parenthesizedQuery); + enterRule(_localctx, 18, RULE_parenthesizedQuery); try { enterOuterAlt(_localctx, 1); { - setState(69); + setState(98); match(LEFT_PARENTHESIS); - setState(70); + setState(99); query(0); - setState(71); + setState(100); match(RIGHT_PARENTHESIS); } } @@ -667,14 +952,14 @@ public T accept(ParseTreeVisitor visitor) { public final RangeQueryContext rangeQuery() throws RecognitionException { RangeQueryContext _localctx = new RangeQueryContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_rangeQuery); + enterRule(_localctx, 20, RULE_rangeQuery); int _la; try { enterOuterAlt(_localctx, 1); { - setState(73); + setState(102); fieldName(); - setState(74); + setState(103); ((RangeQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 960L) != 0)) ) { @@ -685,7 +970,7 @@ public final RangeQueryContext rangeQuery() throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(75); + setState(104); rangeQueryValue(); } } @@ -732,18 +1017,18 @@ public T accept(ParseTreeVisitor visitor) { public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_rangeQueryValue); + enterRule(_localctx, 22, RULE_rangeQueryValue); int _la; try { int _alt; - setState(83); + setState(112); _errHandler.sync(this); switch (_input.LA(1)) { case UNQUOTED_LITERAL: case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(78); + setState(107); _errHandler.sync(this); _alt = 1; do { @@ -751,7 +1036,7 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio case 1: { { - setState(77); + setState(106); _la = _input.LA(1); if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { _errHandler.recoverInline(this); @@ -767,16 +1052,16 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio default: throw new NoViableAltException(this); } - setState(80); + setState(109); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,4,_ctx); + _alt = getInterpreter().adaptivePredict(_input,6,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; case QUOTED_STRING: enterOuterAlt(_localctx, 2); { - setState(82); + setState(111); match(QUOTED_STRING); } break; @@ -823,15 +1108,15 @@ public T accept(ParseTreeVisitor visitor) { public final ExistsQueryContext existsQuery() throws RecognitionException { ExistsQueryContext _localctx = new ExistsQueryContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_existsQuery); + enterRule(_localctx, 24, RULE_existsQuery); try { enterOuterAlt(_localctx, 1); { - setState(85); + setState(114); fieldName(); - setState(86); + setState(115); match(COLON); - setState(87); + setState(116); match(WILDCARD); } } @@ -878,34 +1163,34 @@ public T accept(ParseTreeVisitor visitor) { public final FieldQueryContext fieldQuery() throws RecognitionException { FieldQueryContext _localctx = new FieldQueryContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_fieldQuery); + enterRule(_localctx, 26, RULE_fieldQuery); try { - setState(99); + setState(128); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(89); + setState(118); fieldName(); - setState(90); + setState(119); match(COLON); - setState(91); + setState(120); fieldQueryValue(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(93); + setState(122); fieldName(); - setState(94); + setState(123); match(COLON); - setState(95); + setState(124); match(LEFT_PARENTHESIS); - setState(96); + setState(125); fieldQueryValue(); - setState(97); + setState(126); match(RIGHT_PARENTHESIS); } break; @@ -950,9 +1235,9 @@ public T accept(ParseTreeVisitor visitor) { public final FieldLessQueryContext fieldLessQuery() throws RecognitionException { FieldLessQueryContext _localctx = new FieldLessQueryContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_fieldLessQuery); + enterRule(_localctx, 28, RULE_fieldLessQuery); try { - setState(106); + setState(135); _errHandler.sync(this); switch (_input.LA(1)) { case AND: @@ -963,18 +1248,18 @@ public final FieldLessQueryContext fieldLessQuery() throws RecognitionException case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(101); + setState(130); fieldQueryValue(); } break; case LEFT_PARENTHESIS: enterOuterAlt(_localctx, 2); { - setState(102); + setState(131); match(LEFT_PARENTHESIS); - setState(103); + setState(132); fieldQueryValue(); - setState(104); + setState(133); match(RIGHT_PARENTHESIS); } break; @@ -1037,22 +1322,22 @@ public T accept(ParseTreeVisitor visitor) { public final FieldQueryValueContext fieldQueryValue() throws RecognitionException { FieldQueryValueContext _localctx = new FieldQueryValueContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_fieldQueryValue); + enterRule(_localctx, 30, RULE_fieldQueryValue); int _la; try { int _alt; - setState(128); + setState(157); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,15,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(109); + setState(138); _errHandler.sync(this); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) { { - setState(108); + setState(137); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); @@ -1065,7 +1350,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio } } - setState(112); + setState(141); _errHandler.sync(this); _alt = 1; do { @@ -1073,7 +1358,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 1: { { - setState(111); + setState(140); _la = _input.LA(1); if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { _errHandler.recoverInline(this); @@ -1089,16 +1374,16 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio default: throw new NoViableAltException(this); } - setState(114); + setState(143); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,9,_ctx); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(117); + setState(146); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,10,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: { - setState(116); + setState(145); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); @@ -1116,7 +1401,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 2: enterOuterAlt(_localctx, 2); { - setState(119); + setState(148); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { _errHandler.recoverInline(this); @@ -1126,12 +1411,12 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio _errHandler.reportMatch(this); consume(); } - setState(121); + setState(150); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: { - setState(120); + setState(149); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); @@ -1149,14 +1434,14 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 3: enterOuterAlt(_localctx, 3); { - setState(123); + setState(152); match(NOT); - setState(125); + setState(154); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: { - setState(124); + setState(153); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { _errHandler.recoverInline(this); @@ -1174,7 +1459,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 4: enterOuterAlt(_localctx, 4); { - setState(127); + setState(156); match(QUOTED_STRING); } break; @@ -1218,29 +1503,29 @@ public T accept(ParseTreeVisitor visitor) { public final FieldNameContext fieldName() throws RecognitionException { FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); - enterRule(_localctx, 26, RULE_fieldName); + enterRule(_localctx, 32, RULE_fieldName); try { - setState(133); + setState(162); _errHandler.sync(this); switch (_input.LA(1)) { case UNQUOTED_LITERAL: enterOuterAlt(_localctx, 1); { - setState(130); + setState(159); ((FieldNameContext)_localctx).value = match(UNQUOTED_LITERAL); } break; case QUOTED_STRING: enterOuterAlt(_localctx, 2); { - setState(131); + setState(160); ((FieldNameContext)_localctx).value = match(QUOTED_STRING); } break; case WILDCARD: enterOuterAlt(_localctx, 3); { - setState(132); + setState(161); ((FieldNameContext)_localctx).value = match(WILDCARD); } break; @@ -1263,6 +1548,8 @@ public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { case 1: return query_sempred((QueryContext)_localctx, predIndex); + case 5: + return nestedSubQuery_sempred((NestedSubQueryContext)_localctx, predIndex); } return true; } @@ -1273,87 +1560,117 @@ private boolean query_sempred(QueryContext _localctx, int predIndex) { } return true; } + private boolean nestedSubQuery_sempred(NestedSubQueryContext _localctx, int predIndex) { + switch (predIndex) { + case 1: + return precpred(_ctx, 2); + } + return true; + } public static final String _serializedATN = - "\u0004\u0001\u0010\u0088\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0004\u0001\u0010\u00a5\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ - "\u0002\f\u0007\f\u0002\r\u0007\r\u0001\u0000\u0003\u0000\u001e\b\u0000"+ - "\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0001\u0005\u0001(\b\u0001\n\u0001\f\u0001+\t\u0001"+ - "\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002"+ - "\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001\u0003"+ - "\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ - "\u0001\u0004\u0001\u0005\u0001\u0005\u0003\u0005B\b\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007"+ - "\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0004\bO\b\b\u000b\b\f\b"+ - "P\u0001\b\u0003\bT\b\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n"+ - "\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003"+ - "\nd\b\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0003"+ - "\u000bk\b\u000b\u0001\f\u0003\fn\b\f\u0001\f\u0004\fq\b\f\u000b\f\f\f"+ - "r\u0001\f\u0003\fv\b\f\u0001\f\u0001\f\u0003\fz\b\f\u0001\f\u0001\f\u0003"+ - "\f~\b\f\u0001\f\u0003\f\u0081\b\f\u0001\r\u0001\r\u0001\r\u0003\r\u0086"+ - "\b\r\u0001\r\u0000\u0001\u0002\u000e\u0000\u0002\u0004\u0006\b\n\f\u000e"+ - "\u0010\u0012\u0014\u0016\u0018\u001a\u0000\u0004\u0001\u0000\u0002\u0003"+ - "\u0001\u0000\u0006\t\u0002\u0000\u000e\u000e\u0010\u0010\u0001\u0000\u0002"+ - "\u0004\u0091\u0000\u001d\u0001\u0000\u0000\u0000\u0002!\u0001\u0000\u0000"+ - "\u0000\u00044\u0001\u0000\u0000\u0000\u00066\u0001\u0000\u0000\u0000\b"+ - "9\u0001\u0000\u0000\u0000\nA\u0001\u0000\u0000\u0000\fE\u0001\u0000\u0000"+ - "\u0000\u000eI\u0001\u0000\u0000\u0000\u0010S\u0001\u0000\u0000\u0000\u0012"+ - "U\u0001\u0000\u0000\u0000\u0014c\u0001\u0000\u0000\u0000\u0016j\u0001"+ - "\u0000\u0000\u0000\u0018\u0080\u0001\u0000\u0000\u0000\u001a\u0085\u0001"+ - "\u0000\u0000\u0000\u001c\u001e\u0003\u0002\u0001\u0000\u001d\u001c\u0001"+ - "\u0000\u0000\u0000\u001d\u001e\u0001\u0000\u0000\u0000\u001e\u001f\u0001"+ - "\u0000\u0000\u0000\u001f \u0005\u0000\u0000\u0001 \u0001\u0001\u0000\u0000"+ - "\u0000!\"\u0006\u0001\uffff\uffff\u0000\"#\u0003\u0004\u0002\u0000#)\u0001"+ - "\u0000\u0000\u0000$%\n\u0002\u0000\u0000%&\u0007\u0000\u0000\u0000&(\u0003"+ - "\u0002\u0001\u0002\'$\u0001\u0000\u0000\u0000(+\u0001\u0000\u0000\u0000"+ - ")\'\u0001\u0000\u0000\u0000)*\u0001\u0000\u0000\u0000*\u0003\u0001\u0000"+ - "\u0000\u0000+)\u0001\u0000\u0000\u0000,5\u0003\u0006\u0003\u0000-5\u0003"+ - "\b\u0004\u0000.5\u0003\f\u0006\u0000/5\u0003\n\u0005\u000005\u0003\u0012"+ - "\t\u000015\u0003\u000e\u0007\u000025\u0003\u0014\n\u000035\u0003\u0016"+ - "\u000b\u00004,\u0001\u0000\u0000\u00004-\u0001\u0000\u0000\u00004.\u0001"+ - "\u0000\u0000\u00004/\u0001\u0000\u0000\u000040\u0001\u0000\u0000\u0000"+ - "41\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u000043\u0001\u0000\u0000"+ - "\u00005\u0005\u0001\u0000\u0000\u000067\u0005\u0004\u0000\u000078\u0003"+ - "\u0004\u0002\u00008\u0007\u0001\u0000\u0000\u00009:\u0003\u001a\r\u0000"+ - ":;\u0005\u0005\u0000\u0000;<\u0005\f\u0000\u0000<=\u0003\u0002\u0001\u0000"+ - "=>\u0005\r\u0000\u0000>\t\u0001\u0000\u0000\u0000?@\u0005\u0010\u0000"+ - "\u0000@B\u0005\u0005\u0000\u0000A?\u0001\u0000\u0000\u0000AB\u0001\u0000"+ - "\u0000\u0000BC\u0001\u0000\u0000\u0000CD\u0005\u0010\u0000\u0000D\u000b"+ - "\u0001\u0000\u0000\u0000EF\u0005\n\u0000\u0000FG\u0003\u0002\u0001\u0000"+ - "GH\u0005\u000b\u0000\u0000H\r\u0001\u0000\u0000\u0000IJ\u0003\u001a\r"+ - "\u0000JK\u0007\u0001\u0000\u0000KL\u0003\u0010\b\u0000L\u000f\u0001\u0000"+ - "\u0000\u0000MO\u0007\u0002\u0000\u0000NM\u0001\u0000\u0000\u0000OP\u0001"+ - "\u0000\u0000\u0000PN\u0001\u0000\u0000\u0000PQ\u0001\u0000\u0000\u0000"+ - "QT\u0001\u0000\u0000\u0000RT\u0005\u000f\u0000\u0000SN\u0001\u0000\u0000"+ - "\u0000SR\u0001\u0000\u0000\u0000T\u0011\u0001\u0000\u0000\u0000UV\u0003"+ - "\u001a\r\u0000VW\u0005\u0005\u0000\u0000WX\u0005\u0010\u0000\u0000X\u0013"+ - "\u0001\u0000\u0000\u0000YZ\u0003\u001a\r\u0000Z[\u0005\u0005\u0000\u0000"+ - "[\\\u0003\u0018\f\u0000\\d\u0001\u0000\u0000\u0000]^\u0003\u001a\r\u0000"+ - "^_\u0005\u0005\u0000\u0000_`\u0005\n\u0000\u0000`a\u0003\u0018\f\u0000"+ - "ab\u0005\u000b\u0000\u0000bd\u0001\u0000\u0000\u0000cY\u0001\u0000\u0000"+ - "\u0000c]\u0001\u0000\u0000\u0000d\u0015\u0001\u0000\u0000\u0000ek\u0003"+ - "\u0018\f\u0000fg\u0005\n\u0000\u0000gh\u0003\u0018\f\u0000hi\u0005\u000b"+ - "\u0000\u0000ik\u0001\u0000\u0000\u0000je\u0001\u0000\u0000\u0000jf\u0001"+ - "\u0000\u0000\u0000k\u0017\u0001\u0000\u0000\u0000ln\u0007\u0003\u0000"+ - "\u0000ml\u0001\u0000\u0000\u0000mn\u0001\u0000\u0000\u0000np\u0001\u0000"+ - "\u0000\u0000oq\u0007\u0002\u0000\u0000po\u0001\u0000\u0000\u0000qr\u0001"+ - "\u0000\u0000\u0000rp\u0001\u0000\u0000\u0000rs\u0001\u0000\u0000\u0000"+ - "su\u0001\u0000\u0000\u0000tv\u0007\u0003\u0000\u0000ut\u0001\u0000\u0000"+ - "\u0000uv\u0001\u0000\u0000\u0000v\u0081\u0001\u0000\u0000\u0000wy\u0007"+ - "\u0000\u0000\u0000xz\u0007\u0003\u0000\u0000yx\u0001\u0000\u0000\u0000"+ - "yz\u0001\u0000\u0000\u0000z\u0081\u0001\u0000\u0000\u0000{}\u0005\u0004"+ - "\u0000\u0000|~\u0007\u0000\u0000\u0000}|\u0001\u0000\u0000\u0000}~\u0001"+ - "\u0000\u0000\u0000~\u0081\u0001\u0000\u0000\u0000\u007f\u0081\u0005\u000f"+ - "\u0000\u0000\u0080m\u0001\u0000\u0000\u0000\u0080w\u0001\u0000\u0000\u0000"+ - "\u0080{\u0001\u0000\u0000\u0000\u0080\u007f\u0001\u0000\u0000\u0000\u0081"+ - "\u0019\u0001\u0000\u0000\u0000\u0082\u0086\u0005\u000e\u0000\u0000\u0083"+ - "\u0086\u0005\u000f\u0000\u0000\u0084\u0086\u0005\u0010\u0000\u0000\u0085"+ - "\u0082\u0001\u0000\u0000\u0000\u0085\u0083\u0001\u0000\u0000\u0000\u0085"+ - "\u0084\u0001\u0000\u0000\u0000\u0086\u001b\u0001\u0000\u0000\u0000\u000f"+ - "\u001d)4APScjmruy}\u0080\u0085"; + "\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007"+ + "\u000f\u0002\u0010\u0007\u0010\u0001\u0000\u0003\u0000$\b\u0000\u0001"+ + "\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u0001\u0001\u0001\u0005\u0001.\b\u0001\n\u0001\f\u00011\t\u0001\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0003\u0002;\b\u0002\u0001\u0003\u0001\u0003\u0001"+ + "\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001"+ + "\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0005\u0005L\b\u0005\n\u0005\f\u0005O\t\u0005\u0001\u0006\u0001"+ + "\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003\u0006W\b"+ + "\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ + "\u0003\b_\b\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n"+ + "\u0001\n\u0001\n\u0001\n\u0001\u000b\u0004\u000bl\b\u000b\u000b\u000b"+ + "\f\u000bm\u0001\u000b\u0003\u000bq\b\u000b\u0001\f\u0001\f\u0001\f\u0001"+ + "\f\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001"+ + "\r\u0001\r\u0003\r\u0081\b\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ + "\u000e\u0001\u000e\u0003\u000e\u0088\b\u000e\u0001\u000f\u0003\u000f\u008b"+ + "\b\u000f\u0001\u000f\u0004\u000f\u008e\b\u000f\u000b\u000f\f\u000f\u008f"+ + "\u0001\u000f\u0003\u000f\u0093\b\u000f\u0001\u000f\u0001\u000f\u0003\u000f"+ + "\u0097\b\u000f\u0001\u000f\u0001\u000f\u0003\u000f\u009b\b\u000f\u0001"+ + "\u000f\u0003\u000f\u009e\b\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0003"+ + "\u0010\u00a3\b\u0010\u0001\u0010\u0000\u0002\u0002\n\u0011\u0000\u0002"+ + "\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a\u001c\u001e"+ + " \u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006\t\u0002\u0000"+ + "\u000e\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u00b1\u0000#\u0001\u0000"+ + "\u0000\u0000\u0002\'\u0001\u0000\u0000\u0000\u0004:\u0001\u0000\u0000"+ + "\u0000\u0006<\u0001\u0000\u0000\u0000\b?\u0001\u0000\u0000\u0000\nE\u0001"+ + "\u0000\u0000\u0000\fV\u0001\u0000\u0000\u0000\u000eX\u0001\u0000\u0000"+ + "\u0000\u0010^\u0001\u0000\u0000\u0000\u0012b\u0001\u0000\u0000\u0000\u0014"+ + "f\u0001\u0000\u0000\u0000\u0016p\u0001\u0000\u0000\u0000\u0018r\u0001"+ + "\u0000\u0000\u0000\u001a\u0080\u0001\u0000\u0000\u0000\u001c\u0087\u0001"+ + "\u0000\u0000\u0000\u001e\u009d\u0001\u0000\u0000\u0000 \u00a2\u0001\u0000"+ + "\u0000\u0000\"$\u0003\u0002\u0001\u0000#\"\u0001\u0000\u0000\u0000#$\u0001"+ + "\u0000\u0000\u0000$%\u0001\u0000\u0000\u0000%&\u0005\u0000\u0000\u0001"+ + "&\u0001\u0001\u0000\u0000\u0000\'(\u0006\u0001\uffff\uffff\u0000()\u0003"+ + "\u0004\u0002\u0000)/\u0001\u0000\u0000\u0000*+\n\u0002\u0000\u0000+,\u0007"+ + "\u0000\u0000\u0000,.\u0003\u0002\u0001\u0002-*\u0001\u0000\u0000\u0000"+ + ".1\u0001\u0000\u0000\u0000/-\u0001\u0000\u0000\u0000/0\u0001\u0000\u0000"+ + "\u00000\u0003\u0001\u0000\u0000\u00001/\u0001\u0000\u0000\u00002;\u0003"+ + "\u0006\u0003\u00003;\u0003\b\u0004\u00004;\u0003\u0012\t\u00005;\u0003"+ + "\u0010\b\u00006;\u0003\u0018\f\u00007;\u0003\u0014\n\u00008;\u0003\u001a"+ + "\r\u00009;\u0003\u001c\u000e\u0000:2\u0001\u0000\u0000\u0000:3\u0001\u0000"+ + "\u0000\u0000:4\u0001\u0000\u0000\u0000:5\u0001\u0000\u0000\u0000:6\u0001"+ + "\u0000\u0000\u0000:7\u0001\u0000\u0000\u0000:8\u0001\u0000\u0000\u0000"+ + ":9\u0001\u0000\u0000\u0000;\u0005\u0001\u0000\u0000\u0000<=\u0005\u0004"+ + "\u0000\u0000=>\u0003\u0004\u0002\u0000>\u0007\u0001\u0000\u0000\u0000"+ + "?@\u0003 \u0010\u0000@A\u0005\u0005\u0000\u0000AB\u0005\f\u0000\u0000"+ + "BC\u0003\n\u0005\u0000CD\u0005\r\u0000\u0000D\t\u0001\u0000\u0000\u0000"+ + "EF\u0006\u0005\uffff\uffff\u0000FG\u0003\f\u0006\u0000GM\u0001\u0000\u0000"+ + "\u0000HI\n\u0002\u0000\u0000IJ\u0007\u0000\u0000\u0000JL\u0003\n\u0005"+ + "\u0002KH\u0001\u0000\u0000\u0000LO\u0001\u0000\u0000\u0000MK\u0001\u0000"+ + "\u0000\u0000MN\u0001\u0000\u0000\u0000N\u000b\u0001\u0000\u0000\u0000"+ + "OM\u0001\u0000\u0000\u0000PW\u0003\u0006\u0003\u0000QW\u0003\b\u0004\u0000"+ + "RW\u0003\u000e\u0007\u0000SW\u0003\u0018\f\u0000TW\u0003\u0014\n\u0000"+ + "UW\u0003\u001a\r\u0000VP\u0001\u0000\u0000\u0000VQ\u0001\u0000\u0000\u0000"+ + "VR\u0001\u0000\u0000\u0000VS\u0001\u0000\u0000\u0000VT\u0001\u0000\u0000"+ + "\u0000VU\u0001\u0000\u0000\u0000W\r\u0001\u0000\u0000\u0000XY\u0005\n"+ + "\u0000\u0000YZ\u0003\n\u0005\u0000Z[\u0005\u000b\u0000\u0000[\u000f\u0001"+ + "\u0000\u0000\u0000\\]\u0005\u0010\u0000\u0000]_\u0005\u0005\u0000\u0000"+ + "^\\\u0001\u0000\u0000\u0000^_\u0001\u0000\u0000\u0000_`\u0001\u0000\u0000"+ + "\u0000`a\u0005\u0010\u0000\u0000a\u0011\u0001\u0000\u0000\u0000bc\u0005"+ + "\n\u0000\u0000cd\u0003\u0002\u0001\u0000de\u0005\u000b\u0000\u0000e\u0013"+ + "\u0001\u0000\u0000\u0000fg\u0003 \u0010\u0000gh\u0007\u0001\u0000\u0000"+ + "hi\u0003\u0016\u000b\u0000i\u0015\u0001\u0000\u0000\u0000jl\u0007\u0002"+ + "\u0000\u0000kj\u0001\u0000\u0000\u0000lm\u0001\u0000\u0000\u0000mk\u0001"+ + "\u0000\u0000\u0000mn\u0001\u0000\u0000\u0000nq\u0001\u0000\u0000\u0000"+ + "oq\u0005\u000f\u0000\u0000pk\u0001\u0000\u0000\u0000po\u0001\u0000\u0000"+ + "\u0000q\u0017\u0001\u0000\u0000\u0000rs\u0003 \u0010\u0000st\u0005\u0005"+ + "\u0000\u0000tu\u0005\u0010\u0000\u0000u\u0019\u0001\u0000\u0000\u0000"+ + "vw\u0003 \u0010\u0000wx\u0005\u0005\u0000\u0000xy\u0003\u001e\u000f\u0000"+ + "y\u0081\u0001\u0000\u0000\u0000z{\u0003 \u0010\u0000{|\u0005\u0005\u0000"+ + "\u0000|}\u0005\n\u0000\u0000}~\u0003\u001e\u000f\u0000~\u007f\u0005\u000b"+ + "\u0000\u0000\u007f\u0081\u0001\u0000\u0000\u0000\u0080v\u0001\u0000\u0000"+ + "\u0000\u0080z\u0001\u0000\u0000\u0000\u0081\u001b\u0001\u0000\u0000\u0000"+ + "\u0082\u0088\u0003\u001e\u000f\u0000\u0083\u0084\u0005\n\u0000\u0000\u0084"+ + "\u0085\u0003\u001e\u000f\u0000\u0085\u0086\u0005\u000b\u0000\u0000\u0086"+ + "\u0088\u0001\u0000\u0000\u0000\u0087\u0082\u0001\u0000\u0000\u0000\u0087"+ + "\u0083\u0001\u0000\u0000\u0000\u0088\u001d\u0001\u0000\u0000\u0000\u0089"+ + "\u008b\u0007\u0003\u0000\u0000\u008a\u0089\u0001\u0000\u0000\u0000\u008a"+ + "\u008b\u0001\u0000\u0000\u0000\u008b\u008d\u0001\u0000\u0000\u0000\u008c"+ + "\u008e\u0007\u0002\u0000\u0000\u008d\u008c\u0001\u0000\u0000\u0000\u008e"+ + "\u008f\u0001\u0000\u0000\u0000\u008f\u008d\u0001\u0000\u0000\u0000\u008f"+ + "\u0090\u0001\u0000\u0000\u0000\u0090\u0092\u0001\u0000\u0000\u0000\u0091"+ + "\u0093\u0007\u0003\u0000\u0000\u0092\u0091\u0001\u0000\u0000\u0000\u0092"+ + "\u0093\u0001\u0000\u0000\u0000\u0093\u009e\u0001\u0000\u0000\u0000\u0094"+ + "\u0096\u0007\u0000\u0000\u0000\u0095\u0097\u0007\u0003\u0000\u0000\u0096"+ + "\u0095\u0001\u0000\u0000\u0000\u0096\u0097\u0001\u0000\u0000\u0000\u0097"+ + "\u009e\u0001\u0000\u0000\u0000\u0098\u009a\u0005\u0004\u0000\u0000\u0099"+ + "\u009b\u0007\u0000\u0000\u0000\u009a\u0099\u0001\u0000\u0000\u0000\u009a"+ + "\u009b\u0001\u0000\u0000\u0000\u009b\u009e\u0001\u0000\u0000\u0000\u009c"+ + "\u009e\u0005\u000f\u0000\u0000\u009d\u008a\u0001\u0000\u0000\u0000\u009d"+ + "\u0094\u0001\u0000\u0000\u0000\u009d\u0098\u0001\u0000\u0000\u0000\u009d"+ + "\u009c\u0001\u0000\u0000\u0000\u009e\u001f\u0001\u0000\u0000\u0000\u009f"+ + "\u00a3\u0005\u000e\u0000\u0000\u00a0\u00a3\u0005\u000f\u0000\u0000\u00a1"+ + "\u00a3\u0005\u0010\u0000\u0000\u00a2\u009f\u0001\u0000\u0000\u0000\u00a2"+ + "\u00a0\u0001\u0000\u0000\u0000\u00a2\u00a1\u0001\u0000\u0000\u0000\u00a3"+ + "!\u0001\u0000\u0000\u0000\u0011#/:MV^mp\u0080\u0087\u008a\u008f\u0092"+ + "\u0096\u009a\u009d\u00a2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java index 18ef8f389195b..8200bfe0da25d 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java @@ -56,6 +56,32 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx); + /** + * Visit a parse tree produced by the {@code booleanNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx); + /** + * Visit a parse tree produced by the {@code defaultNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx); + /** + * Visit a parse tree produced by {@link KqlBaseParser#nestedSimpleSubQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx); + /** + * Visit a parse tree produced by {@link KqlBaseParser#nestedParenthesizedQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java index 5f88080fb3ed4..30740833ee40e 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java @@ -11,11 +11,18 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NestedLookup; +import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.support.NestedScope; import java.time.ZoneId; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.function.Supplier; + +import static org.elasticsearch.common.Strings.format; public class KqlParsingContext { @@ -32,10 +39,11 @@ public static Builder builder(QueryRewriteContext queryRewriteContext) { return new Builder(queryRewriteContext); } - private QueryRewriteContext queryRewriteContext; + private final QueryRewriteContext queryRewriteContext; private final boolean caseInsensitive; private final ZoneId timeZone; private final String defaultField; + private final NestedScope nestedScope = new NestedScope(); public KqlParsingContext(QueryRewriteContext queryRewriteContext, boolean caseInsensitive, ZoneId timeZone, String defaultField) { this.queryRewriteContext = queryRewriteContext; @@ -56,9 +64,17 @@ public String defaultField() { return defaultField; } + public String nestedPath(String fieldName) { + return nestedLookup().getNestedParent(fieldName); + } + + public boolean isNestedField(String fieldName) { + return nestedMappers().containsKey(fullFieldName(fieldName)); + } + public Set resolveFieldNames(String fieldNamePattern) { assert fieldNamePattern != null && fieldNamePattern.isEmpty() == false : "fieldNamePattern cannot be null or empty"; - return queryRewriteContext.getMatchingFieldNames(fieldNamePattern); + return queryRewriteContext.getMatchingFieldNames(fullFieldName(fieldNamePattern)); } public Set resolveDefaultFieldNames() { @@ -89,6 +105,38 @@ public boolean isSearchableField(String fieldName) { return isSearchableField(fieldName, fieldType(fieldName)); } + public NestedScope nestedScope() { + return nestedScope; + } + + public T withNestedPath(String nestedFieldName, Supplier supplier) { + assert isNestedField(nestedFieldName); + nestedScope.nextLevel(nestedMappers().get(fullFieldName(nestedFieldName))); + T result = supplier.get(); + nestedScope.previousLevel(); + return result; + } + + public String currentNestedPath() { + return nestedScope().getObjectMapper() != null ? nestedScope().getObjectMapper().fullPath() : null; + } + + public String fullFieldName(String fieldName) { + if (nestedScope.getObjectMapper() == null) { + return fieldName; + } + + return format("%s.%s", nestedScope.getObjectMapper().fullPath(), fieldName); + } + + private NestedLookup nestedLookup() { + return queryRewriteContext.getMappingLookup().nestedLookup(); + } + + private Map nestedMappers() { + return nestedLookup().getNestedMappers(); + } + public static class Builder { private final QueryRewriteContext queryRewriteContext; private boolean caseInsensitive = true; diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java index 588e60bd4dd75..e6e4e20cfd3ca 100644 --- a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java @@ -46,11 +46,9 @@ import static org.hamcrest.Matchers.equalTo; public abstract class AbstractKqlParserTestCase extends AbstractBuilderTestCase { - protected static final String SUPPORTED_QUERY_FILE_PATH = "/supported-queries"; protected static final String UNSUPPORTED_QUERY_FILE_PATH = "/unsupported-queries"; protected static final Predicate BOOLEAN_QUERY_FILTER = (q) -> q.matches("(?i)[^{]*[^\\\\]*(NOT|AND|OR)[^}]*"); - protected static final String NESTED_FIELD_NAME = "mapped_nested"; @Override diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlNestedFieldQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlNestedFieldQueryTests.java new file mode 100644 index 0000000000000..5660945fa0db3 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlNestedFieldQueryTests.java @@ -0,0 +1,297 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.common.Strings.format; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class KqlNestedFieldQueryTests extends AbstractKqlParserTestCase { + public void testInvalidNestedFieldName() { + for (String invalidFieldName : List.of(OBJECT_FIELD_NAME, TEXT_FIELD_NAME, "not_a_field", "mapped_nest*")) { + KqlParsingException e = assertThrows( + KqlParsingException.class, + () -> parseKqlQuery(format("%s : { %s: foo AND %s < 10 } ", invalidFieldName, TEXT_FIELD_NAME, INT_FIELD_NAME)) + ); + assertThat(e.getMessage(), Matchers.containsString(invalidFieldName)); + assertThat(e.getMessage(), Matchers.containsString("is not a valid nested field name")); + } + } + + public void testInlineNestedFieldMatchTextQuery() { + for (String fieldName : List.of(TEXT_FIELD_NAME, INT_FIELD_NAME)) { + { + // Querying a nested text subfield. + String nestedFieldName = format("%s.%s", NESTED_FIELD_NAME, fieldName); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + assertMatchQueryBuilder(nestedQuery.query(), nestedFieldName, searchTerms); + } + + { + // Several levels of nested fields. + String nestedFieldName = format("%s.%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, fieldName); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + NestedQueryBuilder nestedSubQuery = asInstanceOf(NestedQueryBuilder.class, nestedQuery.query()); + assertThat(nestedSubQuery.path(), equalTo(format("%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME))); + + assertMatchQueryBuilder(nestedSubQuery.query(), nestedFieldName, searchTerms); + } + } + } + + public void testInlineNestedFieldMatchKeywordFieldQuery() { + { + // Querying a nested text subfield. + String nestedFieldName = format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + assertTermQueryBuilder(nestedQuery.query(), nestedFieldName, searchTerms); + } + + { + // Several levels of nested fields. + String nestedFieldName = format("%s.%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, KEYWORD_FIELD_NAME); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + NestedQueryBuilder nestedSubQuery = asInstanceOf(NestedQueryBuilder.class, nestedQuery.query()); + assertThat(nestedSubQuery.path(), equalTo(format("%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME))); + + assertTermQueryBuilder(nestedSubQuery.query(), nestedFieldName, searchTerms); + } + } + + public void testInlineNestedFieldRangeQuery() { + { + // Querying a nested text subfield. + String nestedFieldName = format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME); + String operator = randomFrom(">", ">=", "<", "<="); + String kqlQueryString = format("%s %s %s", nestedFieldName, operator, randomDouble()); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + assertRangeQueryBuilder(nestedQuery.query(), nestedFieldName, rangeQueryBuilder -> {}); + } + + { + // Several levels of nested fields. + String nestedFieldName = format("%s.%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, INT_FIELD_NAME); + String operator = randomFrom(">", ">=", "<", "<="); + String kqlQueryString = format("%s %s %s", nestedFieldName, operator, randomDouble()); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + NestedQueryBuilder nestedSubQuery = asInstanceOf(NestedQueryBuilder.class, nestedQuery.query()); + assertThat(nestedSubQuery.path(), equalTo(format("%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME))); + + assertRangeQueryBuilder(nestedSubQuery.query(), nestedFieldName, rangeQueryBuilder -> {}); + } + } + + public void testNestedQuerySyntax() { + // Single word - Keyword & text field + List.of(KEYWORD_FIELD_NAME, TEXT_FIELD_NAME) + .forEach( + fieldName -> assertThat( + parseKqlQuery(format("%s : { %s : %s }", NESTED_FIELD_NAME, fieldName, "foo")), + equalTo(parseKqlQuery(format("%s.%s : %s", NESTED_FIELD_NAME, fieldName, "foo"))) + ) + ); + + // Multiple words - Keyword & text field + List.of(KEYWORD_FIELD_NAME, TEXT_FIELD_NAME) + .forEach( + fieldName -> assertThat( + parseKqlQuery(format("%s : { %s : %s }", NESTED_FIELD_NAME, fieldName, "foo bar")), + equalTo(parseKqlQuery(format("%s.%s : %s", NESTED_FIELD_NAME, fieldName, "foo bar"))) + ) + ); + + // Range syntax + { + String operator = randomFrom("<", "<=", ">", ">="); + double rangeValue = randomDouble(); + assertThat( + parseKqlQuery(format("%s : { %s %s %s }", NESTED_FIELD_NAME, INT_FIELD_NAME, operator, rangeValue)), + equalTo(parseKqlQuery(format("%s.%s %s %s", NESTED_FIELD_NAME, INT_FIELD_NAME, operator, rangeValue))) + ); + } + + // Several level of nesting + { + QueryBuilder inlineQuery = parseKqlQuery( + format("%s.%s.%s : %s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar") + ); + + assertThat( + parseKqlQuery(format("%s : { %s : { %s : %s } }", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar")), + equalTo(inlineQuery) + ); + + assertThat( + parseKqlQuery(format("%s.%s : { %s : %s }", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar")), + equalTo(inlineQuery) + ); + + assertThat( + parseKqlQuery(format("%s : { %s.%s : %s }", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar")), + equalTo(inlineQuery) + ); + } + } + + public void testBooleanAndNestedQuerySyntax() { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery( + format("%s: { %s : foo AND %s: bar AND %s > 3}", NESTED_FIELD_NAME, TEXT_FIELD_NAME, KEYWORD_FIELD_NAME, INT_FIELD_NAME) + ) + ); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.mustNot(), empty()); + assertThat(subQuery.must(), hasSize(3)); + assertMatchQueryBuilder( + subQuery.must().stream().filter(q -> q instanceof MatchQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, TEXT_FIELD_NAME), + "foo" + ); + assertTermQueryBuilder( + subQuery.must().stream().filter(q -> q instanceof TermQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME), + "bar" + ); + assertRangeQueryBuilder( + subQuery.must().stream().filter(q -> q instanceof RangeQueryBuilder).findAny().get(), + format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME), + q -> {} + ); + } + + public void testBooleanOrNestedQuerySyntax() { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery( + format("%s: { %s : foo OR %s: bar OR %s > 3 }", NESTED_FIELD_NAME, TEXT_FIELD_NAME, KEYWORD_FIELD_NAME, INT_FIELD_NAME) + ) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.mustNot(), empty()); + assertThat(subQuery.should(), hasSize(3)); + assertMatchQueryBuilder( + subQuery.should().stream().filter(q -> q instanceof MatchQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, TEXT_FIELD_NAME), + "foo" + ); + assertTermQueryBuilder( + subQuery.should().stream().filter(q -> q instanceof TermQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME), + "bar" + ); + assertRangeQueryBuilder( + subQuery.should().stream().filter(q -> q instanceof RangeQueryBuilder).findAny().get(), + format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME), + q -> {} + ); + } + + public void testBooleanNotNestedQuerySyntax() { + { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery(format("%s: { NOT %s : foo }", NESTED_FIELD_NAME, TEXT_FIELD_NAME)) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.mustNot(), hasSize(1)); + assertMatchQueryBuilder(subQuery.mustNot().get(0), format("%s.%s", NESTED_FIELD_NAME, TEXT_FIELD_NAME), "foo"); + } + + { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery(format("%s: { NOT %s : foo }", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME)) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.mustNot(), hasSize(1)); + assertTermQueryBuilder(subQuery.mustNot().get(0), format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME), "foo"); + } + + { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery(format("%s: { NOT %s < 3 }", NESTED_FIELD_NAME, INT_FIELD_NAME)) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.mustNot(), hasSize(1)); + assertRangeQueryBuilder(subQuery.mustNot().get(0), format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME), q -> {}); + } + } + + private static String randomSearchTerms() { + return Stream.generate(ESTestCase::randomIdentifier).limit(randomIntBetween(1, 10)).collect(Collectors.joining(" ")); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java index 45dd3312bbc03..6415cdb94ada7 100644 --- a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java @@ -10,7 +10,10 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; + +import java.util.regex.Pattern; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; @@ -35,11 +38,18 @@ public void testParseExistsQueryWithNoMatchingFields() { public void testParseExistsQueryWithASingleField() { for (String fieldName : searchableFields()) { - ExistsQueryBuilder parsedQuery = asInstanceOf(ExistsQueryBuilder.class, parseKqlQuery(kqlExistsQuery(fieldName))); - assertThat(parsedQuery.fieldName(), equalTo(fieldName)); + QueryBuilder parsedQuery = parseKqlQuery(kqlExistsQuery(fieldName)); // Using quotes to wrap the field name does not change the result. assertThat(parseKqlQuery(kqlExistsQuery("\"" + fieldName + "\"")), equalTo(parsedQuery)); + + long nestingLevel = Pattern.compile("[.]").splitAsStream(fieldName).takeWhile(s -> s.equals(NESTED_FIELD_NAME)).count(); + for (int i = 0; i < nestingLevel; i++) { + parsedQuery = asInstanceOf(NestedQueryBuilder.class, parsedQuery).query(); + } + + ExistsQueryBuilder existsQuery = asInstanceOf(ExistsQueryBuilder.class, parsedQuery); + assertThat(existsQuery.fieldName(), equalTo(fieldName)); } } @@ -53,7 +63,9 @@ public void testParseExistsQueryUsingWildcardFieldName() { assertThat( parsedQuery.should(), - containsInAnyOrder(searchableFields(fieldNamePattern).stream().map(QueryBuilders::existsQuery).toArray()) + containsInAnyOrder( + searchableFields(fieldNamePattern).stream().map(fieldName -> parseKqlQuery(kqlExistsQuery(fieldName))).toArray() + ) ); } diff --git a/x-pack/plugin/kql/src/test/resources/supported-queries b/x-pack/plugin/kql/src/test/resources/supported-queries index b659b1ae5b1db..f54a1d32fe3be 100644 --- a/x-pack/plugin/kql/src/test/resources/supported-queries +++ b/x-pack/plugin/kql/src/test/resources/supported-queries @@ -91,13 +91,6 @@ mapped_nested: { NOT(mapped_string:foo AND mapped_string_2:foo bar) } mapped_nested: { NOT mapped_string:foo AND NOT mapped_string_2:foo bar } mapped_nested: { (NOT mapped_string:foo) AND (NOT mapped_string_2:foo bar) } mapped_nested: { NOT(mapped_string:foo) AND NOT(mapped_string_2:foo bar) } -mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar AND foo bar } -mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar OR foo bar } -mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar OR foo bar } -mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar AND foo bar } -mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } -mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } -mapped_nested: { mapped_string:foo OR (mapped_string_2:foo bar OR foo bar) } mapped_nested: { mapped_str*:foo } mapped_nested: { mapped_nested : { mapped_string:foo AND mapped_int < 3 } AND mapped_string_2:foo bar } mapped_nested: { mapped_nested.mapped_string:foo AND mapped_string_2:foo bar } diff --git a/x-pack/plugin/kql/src/test/resources/unsupported-queries b/x-pack/plugin/kql/src/test/resources/unsupported-queries index 149bcf5bd2b5a..526ae94d6ac88 100644 --- a/x-pack/plugin/kql/src/test/resources/unsupported-queries +++ b/x-pack/plugin/kql/src/test/resources/unsupported-queries @@ -25,6 +25,20 @@ mapped_string:(foo (bar)) // Bad syntax for nested fields: mapped_nested { mapped_string: bar } +// Unknown nested field or not a nested field +not_nested : { mapped_string: bar } +mapped_string: { mapped_string: bar } + +// Nested query can not use fieldless subqueries +mapped_nested: { foo } +mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar AND foo bar } +mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar OR foo bar } +mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar OR foo bar } +mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar AND foo bar } +mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } +mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } +mapped_nested: { mapped_string:foo OR (mapped_string_2:foo bar OR foo bar) } + // Missing escape sequences: mapped_string: foo:bar mapped_string: (foo and bar) diff --git a/x-pack/plugin/kql/src/yamlRestTest/resources/rest-api-spec/test/kql/50_kql_nested_fields_query.yml b/x-pack/plugin/kql/src/yamlRestTest/resources/rest-api-spec/test/kql/50_kql_nested_fields_query.yml new file mode 100644 index 0000000000000..4ce6688e5222d --- /dev/null +++ b/x-pack/plugin/kql/src/yamlRestTest/resources/rest-api-spec/test/kql/50_kql_nested_fields_query.yml @@ -0,0 +1,218 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ kql_query ] + test_runner_features: [ capabilities, contains ] + reason: KQL query is not available + + - requires: + "test_runner_features": "contains" + + - do: + indices.create: + index: test-index + body: + mappings: + properties: + department: + type: keyword + staff: + type: integer + courses: + type: nested + properties: + name: + type: text + credits: + type: integer + sessions: + type: nested + properties: + semester: + type: keyword + students: + type: integer + + - do: + bulk: + index: test-index + refresh: true + body: | + { "index" : { "_id": "doc-1" } } + { "department": "compsci", "staff": 12, "courses": [ { "name": "Object Oriented Programming", "credits": 3, "sessions": [ { "semester": "spr2021", "students": 37 }, { "semester": "fall2020", "students": 45} ] }, { "name": "Theory of Computation", "credits": 4, "sessions": [ { "semester": "spr2021", "students": 19 }, { "semester": "fall2020", "students": 14 } ] } ] } + { "index" : { "_id": "doc-42" } } + { "department": "math", "staff": 20, "courses": [ { "name": "Precalculus", "credits": 1, "sessions": [ { "semester": "spr2021", "students": 100 }, { "semester": "fall2020", "students": 134 } ] }, { "name": "Linear Algebra", "credits": 3, "sessions": [ { "semester": "spr2021", "students": 29 }, { "semester": "fall2020", "students": 23 } ] } ] } + +--- +"Inline syntax": + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses.name: object oriented programming" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses.name: object oriented programming AND courses.credits > 3" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses.name: object oriented programming OR courses.credits > 3" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + +--- +"Nested field syntax": + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses : { name: object oriented programming }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming AND credits > 3 }" + } + } + } + - match: { hits.total: 0 } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming AND credits >= 3 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming OR credits > 3 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { NOT name: object oriented programming AND credits < 4 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-42" } + + +--- +"Several level of nesting field syntax": + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming AND sessions.semester: spr2021 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { sessions : { semester: spr2021 AND students < 20 } }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: computation AND sessions : { semester: spr2021 AND students < 20 } }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } diff --git a/x-pack/plugin/logsdb/qa/with-basic/build.gradle b/x-pack/plugin/logsdb/qa/with-basic/build.gradle index 2fdeed338e1c1..44ebd83bf4f4c 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/build.gradle +++ b/x-pack/plugin/logsdb/qa/with-basic/build.gradle @@ -15,7 +15,7 @@ dependencies { tasks.named("javaRestTest").configure { // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) usesDefaultDistribution() } diff --git a/x-pack/plugin/mapper-constant-keyword/build.gradle b/x-pack/plugin/mapper-constant-keyword/build.gradle index ad9d3c2f86637..3b11d951fe37a 100644 --- a/x-pack/plugin/mapper-constant-keyword/build.gradle +++ b/x-pack/plugin/mapper-constant-keyword/build.gradle @@ -18,7 +18,7 @@ dependencies { compileOnly project(path: xpackModule('core')) } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/mapper-unsigned-long/build.gradle b/x-pack/plugin/mapper-unsigned-long/build.gradle index e011723da6230..faad1db822560 100644 --- a/x-pack/plugin/mapper-unsigned-long/build.gradle +++ b/x-pack/plugin/mapper-unsigned-long/build.gradle @@ -37,7 +37,7 @@ restResources { } } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index b43d87c17e644..ec04bfdd058f9 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -645,7 +645,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (dimension && numericValue != null) { - context.getDimensions().addUnsignedLong(fieldType().name(), numericValue).validate(context.indexSettings()); + context.getRoutingFields().addUnsignedLong(fieldType().name(), numericValue); } List fields = new ArrayList<>(); diff --git a/x-pack/plugin/mapper-version/build.gradle b/x-pack/plugin/mapper-version/build.gradle index 69622762b9d5b..fb760b3446dfd 100644 --- a/x-pack/plugin/mapper-version/build.gradle +++ b/x-pack/plugin/mapper-version/build.gradle @@ -25,7 +25,7 @@ dependencies { testImplementation project(path: xpackModule('analytics')) } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index e79a771293392..67c26c78a6741 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -94,7 +94,7 @@ dependencies { } def mlCppVersion(){ - return (project.gradle.parent != null && BuildParams.isSnapshotBuild() == false) ? + return (project.gradle.parent != null && buildParams.isSnapshotBuild() == false) ? (project.version + "-SNAPSHOT") : project.version; } diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index 64970d18b5c82..3854c70b0f389 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -18,7 +18,7 @@ testClusters.configureEach { setting 'slm.history_index_enabled', 'false' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ml/qa/disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle index 232700d5f84aa..0d1d8d6484afc 100644 --- a/x-pack/plugin/ml/qa/disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -12,7 +12,7 @@ testClusters.configureEach { setting 'xpack.ml.enabled', 'false' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index bc22552d0d734..b43132c2daf50 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -12,7 +12,7 @@ dependencies { testImplementation project(':x-pack:qa') } -Version ccsCompatVersion = BuildParams.bwcVersions.minimumWireCompatibleVersion +Version ccsCompatVersion = buildParams.bwcVersions.minimumWireCompatibleVersion restResources { restApi { diff --git a/x-pack/plugin/ml/qa/single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle index 6979ec4dcbd31..5ed1c5179716f 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -12,7 +12,7 @@ testClusters.configureEach { setting 'xpack.license.self_generated.type', 'trial' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java index bab7bb52f928f..5a06308a3c8cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java @@ -17,7 +17,11 @@ public static Builder builder(ByteSizeValue nodeSize, ByteSizeValue tierSize) { } public static Builder from(AutoscalingCapacity autoscalingCapacity) { - return builder(autoscalingCapacity.node().memory(), autoscalingCapacity.total().memory()); + if (autoscalingCapacity == null) { + return builder(null, null); + } else { + return builder(autoscalingCapacity.node().memory(), autoscalingCapacity.total().memory()); + } } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java index dfe0e557f749d..0ff6aece95ab1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java @@ -809,7 +809,7 @@ static MlMemoryAutoscalingCapacity ensureScaleDown( MlMemoryAutoscalingCapacity scaleDownResult, MlMemoryAutoscalingCapacity currentCapacity ) { - if (scaleDownResult == null || currentCapacity == null) { + if (scaleDownResult == null || currentCapacity == null || currentCapacity.isUndetermined()) { return null; } MlMemoryAutoscalingCapacity newCapacity = MlMemoryAutoscalingCapacity.builder( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java index 56cdcc88df91b..15fb2b2b81f30 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java @@ -138,16 +138,20 @@ public void testRunJob_TaskIsStopping() { when(task.isStopping()).thenReturn(true); when(task.getParams()).thenReturn(new StartDataFrameAnalyticsAction.TaskParams("data_frame_id", MlConfigVersion.CURRENT, false)); - processManager.runJob(task, dataFrameAnalyticsConfig, dataExtractorFactory, + processManager.runJob( + task, + dataFrameAnalyticsConfig, + dataExtractorFactory, ActionTestUtils.assertNoFailureListener(stepResponse -> { - assertThat(processManager.getProcessContextCount(), equalTo(0)); - assertThat(stepResponse.isTaskComplete(), is(true)); + assertThat(processManager.getProcessContextCount(), equalTo(0)); + assertThat(stepResponse.isTaskComplete(), is(true)); - InOrder inOrder = inOrder(task); - inOrder.verify(task).isStopping(); - inOrder.verify(task).getParams(); - verifyNoMoreInteractions(task); - })); + InOrder inOrder = inOrder(task); + inOrder.verify(task).isStopping(); + inOrder.verify(task).getParams(); + verifyNoMoreInteractions(task); + }) + ); } public void testRunJob_ProcessContextAlreadyExists() { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndicesStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndicesStatsMonitoringDocTests.java index 6822f54633bdc..b1d4f3ff7045f 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndicesStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndicesStatsMonitoringDocTests.java @@ -53,6 +53,8 @@ public void setUp() throws Exception { "dcvO5uZATE-EhIKc3tk9Bg", null, null, + null, + null, new ShardStats[] { // Primaries new ShardStats(mockShardRouting(true), mockShardPath(), mockCommonStats(), null, null, null, false, 0), diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml index f3914843b80ec..42c01f0b9636c 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml @@ -35,6 +35,16 @@ setup: properties: views: type: long + nested_inner_hits: + type: nested + properties: + data: + type: keyword + paragraph_id: + type: dense_vector + dims: 1 + index: true + similarity: l2_norm - do: index: @@ -125,6 +135,16 @@ setup: integer: 2 keyword: "technology" nested: { views: 10} + nested_inner_hits: [{"data": "foo"}, {"data": "bar"}, {"data": "baz"}] + + - do: + index: + index: test + id: "10" + body: + id: 10 + integer: 3 + nested_inner_hits: [ {"data": "foo", "paragraph_id": [1]}] - do: indices.refresh: {} @@ -960,3 +980,94 @@ setup: - length: { hits.hits : 1 } - match: { hits.hits.0._id: "1" } + +--- +"rrf retriever with inner_hits for sub-retriever": + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ nested_retriever_inner_hits_support ] + test_runner_features: capabilities + reason: "Support for propagating nested retrievers' inner hits to the top-level compound retriever is required" + + - do: + search: + _source: false + index: test + body: + retriever: + rrf: + retrievers: [ + { + # this will return doc 9 and doc 10 + standard: { + query: { + nested: { + path: nested_inner_hits, + inner_hits: { + name: nested_data_field, + _source: false, + "sort": [ { + "nested_inner_hits.data": "asc" + } + ], + fields: [ nested_inner_hits.data ] + }, + query: { + match_all: { } + } + } + } + } + }, + { + # this will return doc 10 + standard: { + query: { + nested: { + path: nested_inner_hits, + inner_hits: { + name: nested_vector_field, + _source: false, + size: 1, + "fields": [ "nested_inner_hits.paragraph_id" ] + }, + query: { + knn: { + field: nested_inner_hits.paragraph_id, + query_vector: [ 1 ], + num_candidates: 10 + } + } + } + } + } + }, + { + standard: { + query: { + match_all: { } + } + } + } + ] + rank_window_size: 10 + rank_constant: 10 + size: 3 + + - match: { hits.total.value: 10 } + + - match: { hits.hits.0.inner_hits.nested_data_field.hits.total.value: 1 } + - match: { hits.hits.0.inner_hits.nested_data_field.hits.hits.0.fields.nested_inner_hits.0.data.0: foo } + - match: { hits.hits.0.inner_hits.nested_vector_field.hits.total.value: 1 } + - match: { hits.hits.0.inner_hits.nested_vector_field.hits.hits.0.fields.nested_inner_hits.0.paragraph_id: [ 1 ] } + + - match: { hits.hits.1.inner_hits.nested_data_field.hits.total.value: 3 } + - match: { hits.hits.1.inner_hits.nested_data_field.hits.hits.0.fields.nested_inner_hits.0.data.0: bar } + - match: { hits.hits.1.inner_hits.nested_data_field.hits.hits.1.fields.nested_inner_hits.0.data.0: baz } + - match: { hits.hits.1.inner_hits.nested_data_field.hits.hits.2.fields.nested_inner_hits.0.data.0: foo } + - match: { hits.hits.1.inner_hits.nested_vector_field.hits.total.value: 0 } + + - match: { hits.hits.2.inner_hits.nested_data_field.hits.total.value: 0 } + - match: { hits.hits.2.inner_hits.nested_vector_field.hits.total.value: 0 } diff --git a/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle b/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle index 01264d7849680..4683c13f1fc0c 100644 --- a/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle @@ -38,7 +38,7 @@ tasks.named("javaRestTest") { systemProperty 'test.azure.container', azureContainer systemProperty 'test.azure.key', azureKey systemProperty 'test.azure.sas_token', azureSasToken - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repositories_metering_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repositories_metering_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { diff --git a/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle b/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle index b8c345c99b895..62fe47c08f5f5 100644 --- a/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle @@ -35,7 +35,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repositories_metering" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repositories_metering" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile } diff --git a/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle b/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle index 5f2bf66f31b21..3c58e6a06af69 100644 --- a/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle @@ -38,7 +38,7 @@ tasks.named("javaRestTest").configure { systemProperty 'test.s3.bucket', s3Bucket systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repositories_metering" + BuildParams.testSeed : 'base_path_integration_tests' + nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repositories_metering" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest").configure { dependsOn "javaRestTest" diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle index 6774ef920f280..e2f77fae89225 100644 --- a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle @@ -33,7 +33,7 @@ tasks.named("javaRestTest").configure { systemProperty 'test.azure.container', azureContainer systemProperty 'test.azure.key', azureKey systemProperty 'test.azure.sas_token', azureSasToken - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_searchable_snapshots_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_searchable_snapshots_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { diff --git a/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle b/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle index 3099f0787998e..c0a420aff313a 100644 --- a/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle @@ -29,7 +29,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_searchable_snapshots_tests" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_searchable_snapshots_tests" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile diff --git a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle index 52ea873ae53bf..e8d97da9a9e37 100644 --- a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle @@ -28,6 +28,6 @@ restResources { tasks.named("javaRestTest").configure { usesDefaultDistribution() - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle index 8919ddc6d29fd..430df2a7e8122 100644 --- a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle @@ -40,7 +40,7 @@ tasks.named("javaRestTest").configure { systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_searchable_snapshots_tests" + BuildParams.testSeed : 'base_path_integration_tests' + nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_searchable_snapshots_tests" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java index 9dadb75e87cef..c378fef9428ba 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.searchablesnapshots.BaseSearchableSnapshotsIntegTestCase; -import org.elasticsearch.xpack.searchablesnapshots.allocation.decider.SearchableSnapshotEnableAllocationDecider; import org.hamcrest.Matchers; import java.util.List; @@ -31,9 +30,7 @@ public void testAllocationDisabled() throws Exception { final String restoredIndexName = setupMountedIndex(); int numPrimaries = getNumShards(restoredIndexName).numPrimaries; setEnableAllocation(EnableAllocationDecider.Allocation.PRIMARIES); - if (randomBoolean()) { - setAllocateOnRollingRestart(false); - } + Set indexNodes = internalCluster().nodesInclude(restoredIndexName); for (String indexNode : indexNodes) { internalCluster().restartNode(indexNode); @@ -43,16 +40,13 @@ public void testAllocationDisabled() throws Exception { .actionGet(); assertThat(response.getUnassignedShards(), Matchers.equalTo(numPrimaries)); - setAllocateOnRollingRestart(true); + setEnableAllocation(null); ensureGreen(restoredIndexName); } public void testAllocateOnRollingRestartEnabled() throws Exception { final String restoredIndexName = setupMountedIndex(); - if (randomBoolean()) { - setEnableAllocation(EnableAllocationDecider.Allocation.PRIMARIES); - } - setAllocateOnRollingRestart(true); + setEnableAllocation(null); Set indexNodes = internalCluster().nodesInclude(restoredIndexName); for (String indexNode : indexNodes) { internalCluster().restartNode(indexNode); @@ -74,14 +68,7 @@ private String setupMountedIndex() throws Exception { } public void setEnableAllocation(EnableAllocationDecider.Allocation allocation) { - setSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, allocation.name()); - } - - public void setAllocateOnRollingRestart(boolean allocateOnRollingRestart) { - setSetting( - SearchableSnapshotEnableAllocationDecider.SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART, - Boolean.toString(allocateOnRollingRestart) - ); + setSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, allocation != null ? allocation.name() : null); } private void setSetting(Setting setting, String value) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index eabdf7c9bf46c..8bb4c45e54ab3 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -304,7 +304,6 @@ public List> getSettings() { CacheService.SNAPSHOT_CACHE_SYNC_INTERVAL_SETTING, CacheService.SNAPSHOT_CACHE_MAX_FILES_TO_SYNC_AT_ONCE_SETTING, CacheService.SNAPSHOT_CACHE_SYNC_SHUTDOWN_TIMEOUT, - SearchableSnapshotEnableAllocationDecider.SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART, BlobStoreCacheMaintenanceService.SNAPSHOT_SNAPSHOT_CLEANUP_INTERVAL_SETTING, BlobStoreCacheMaintenanceService.SNAPSHOT_SNAPSHOT_CLEANUP_KEEP_ALIVE_SETTING, BlobStoreCacheMaintenanceService.SNAPSHOT_SNAPSHOT_CLEANUP_BATCH_SIZE_SETTING, diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java index 1e360fc2f3503..b6a301a01c782 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java @@ -15,50 +15,26 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.UpdateForV9; public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider { static final String NAME = "searchable_snapshots_enable"; - /** - * This setting describes whether searchable snapshots are allocated during rolling restarts. For now, whether a rolling restart is - * ongoing is determined by cluster.routing.allocation.enable=primaries. Notice that other values for that setting except "all" mean - * that no searchable snapshots are allocated anyway. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - // xpack.searchable.snapshot.allocate_on_rolling_restart was only temporary, remove it in the next major - public static final Setting SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART = Setting.boolSetting( - "xpack.searchable.snapshot.allocate_on_rolling_restart", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope, - Setting.Property.Deprecated - ); - private volatile EnableAllocationDecider.Allocation enableAllocation; - private volatile boolean allocateOnRollingRestart; public SearchableSnapshotEnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.enableAllocation = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); - this.allocateOnRollingRestart = SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART.get(settings); clusterSettings.addSettingsUpdateConsumer( EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation ); - clusterSettings.addSettingsUpdateConsumer(SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART, this::setAllocateOnRollingRestart); } private void setEnableAllocation(EnableAllocationDecider.Allocation allocation) { this.enableAllocation = allocation; } - private void setAllocateOnRollingRestart(boolean allocateOnRollingRestart) { - this.allocateOnRollingRestart = allocateOnRollingRestart; - } - @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return canAllocate(shardRouting, allocation); @@ -73,25 +49,14 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); if (indexMetadata.isSearchableSnapshot()) { EnableAllocationDecider.Allocation enableAllocationCopy = this.enableAllocation; - boolean allocateOnRollingRestartCopy = this.allocateOnRollingRestart; if (enableAllocationCopy == EnableAllocationDecider.Allocation.PRIMARIES) { - if (allocateOnRollingRestartCopy == false) { - return allocation.decision( - Decision.NO, - NAME, - "no allocations of searchable snapshots allowed during rolling restart due to [%s=%s] and [%s=false]", - EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), - enableAllocationCopy, - SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART.getKey() - ); - } else { - return allocation.decision( - Decision.YES, - NAME, - "allocate on rolling restart enabled [%s=true]", - SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART.getKey() - ); - } + return allocation.decision( + Decision.NO, + NAME, + "no allocations of searchable snapshots allowed during rolling restart due to [%s=%s]", + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), + enableAllocationCopy + ); } else { return allocation.decision( Decision.YES, diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index dcf3c7305dbc7..8fd3dd29f87a4 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -36,7 +36,7 @@ tasks.named("test").configure { systemProperty 'tests.security.manager', 'false' // the main code under test runs without the SecurityManager } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { tasks.named("test").configure { enabled = false } diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle index f751fcd0a655d..f53ff7027f126 100644 --- a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle @@ -6,7 +6,7 @@ */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' // See the build.gradle file in the parent directory for an explanation of this unusual build diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle index c4c0f2ebd2fe1..d24299a3847da 100644 --- a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle @@ -6,7 +6,7 @@ */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' // See the build.gradle file in the parent directory for an explanation of this unusual build diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle index 580ca45055219..4418bd32e64cf 100644 --- a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle @@ -6,7 +6,7 @@ */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' // See the build.gradle file in the parent directory for an explanation of this unusual build diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle index 5957216a3e12d..a6930d38d41e5 100644 --- a/x-pack/plugin/security/qa/basic-enable-security/build.gradle +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -16,7 +16,7 @@ dependencies { tasks.named("javaRestTest").configure { // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) usesDefaultDistribution() } diff --git a/x-pack/plugin/security/qa/multi-cluster/build.gradle b/x-pack/plugin/security/qa/multi-cluster/build.gradle index b8eccb14819a4..8ee449d39dcce 100644 --- a/x-pack/plugin/security/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/security/qa/multi-cluster/build.gradle @@ -35,7 +35,7 @@ tasks.named("javaRestTest") { exclude '**/RemoteClusterSecurityBWCToRCS2ClusterRestIT.class' } -BuildParams.bwcVersions.withWireCompatible() { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible() { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1MissingIndicesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1MissingIndicesIT.java new file mode 100644 index 0000000000000..0f39104511be0 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1MissingIndicesIT.java @@ -0,0 +1,574 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matchers; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +/** + * Tests cross-cluster ES|QL queries under RCS1.0 security model for cases where index expressions do not match + * to ensure handling of those matches the expected rules defined in EsqlSessionCrossClusterUtils. + */ +public class CrossClusterEsqlRCS1MissingIndicesIT extends AbstractRemoteClusterSecurityTestCase { + + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + + static { + // remote cluster + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + // "local" cluster + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + private static final String INDEX1 = "points"; // on local cluster only + private static final String INDEX2 = "squares"; // on local and remote clusters + + record ExpectedCluster(String clusterAlias, String indexExpression, String status, Integer totalShards) {} + + @SuppressWarnings("unchecked") + public void assertExpectedClustersForMissingIndicesTests(Map responseMap, List expected) { + Map clusters = (Map) responseMap.get("_clusters"); + assertThat((int) responseMap.get("took"), greaterThan(0)); + + Map detailsMap = (Map) clusters.get("details"); + assertThat(detailsMap.size(), is(expected.size())); + + assertThat((int) clusters.get("total"), is(expected.size())); + assertThat((int) clusters.get("successful"), is((int) expected.stream().filter(ec -> ec.status().equals("successful")).count())); + assertThat((int) clusters.get("skipped"), is((int) expected.stream().filter(ec -> ec.status().equals("skipped")).count())); + assertThat((int) clusters.get("failed"), is((int) expected.stream().filter(ec -> ec.status().equals("failed")).count())); + + for (ExpectedCluster expectedCluster : expected) { + Map clusterDetails = (Map) detailsMap.get(expectedCluster.clusterAlias()); + String msg = expectedCluster.clusterAlias(); + + assertThat(msg, (int) clusterDetails.get("took"), greaterThan(0)); + assertThat(msg, clusterDetails.get("status"), is(expectedCluster.status())); + Map shards = (Map) clusterDetails.get("_shards"); + if (expectedCluster.totalShards() == null) { + assertThat(msg, (int) shards.get("total"), greaterThan(0)); + } else { + assertThat(msg, (int) shards.get("total"), is(expectedCluster.totalShards())); + } + + if (expectedCluster.status().equals("successful")) { + assertThat((int) shards.get("successful"), is((int) shards.get("total"))); + assertThat((int) shards.get("skipped"), is(0)); + + } else if (expectedCluster.status().equals("skipped")) { + assertThat((int) shards.get("successful"), is(0)); + assertThat((int) shards.get("skipped"), is((int) shards.get("total"))); + ArrayList failures = (ArrayList) clusterDetails.get("failures"); + assertThat(failures.size(), is(1)); + Map failure1 = (Map) failures.get(0); + Map innerReason = (Map) failure1.get("reason"); + String expectedMsg = "Unknown index [" + expectedCluster.indexExpression() + "]"; + assertThat(innerReason.get("reason").toString(), containsString(expectedMsg)); + assertThat(innerReason.get("type").toString(), containsString("verification_exception")); + + } else { + fail(msg + "; Unexpected status: " + expectedCluster.status()); + } + // currently failed shards is always zero - change this once we start allowing partial data for individual shard failures + assertThat((int) shards.get("failed"), is(0)); + } + } + + @SuppressWarnings("unchecked") + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableTrue() throws Exception { + setupRolesAndPrivileges(); + setupIndex(); + + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, true, randomBoolean(), true); + + // missing concrete local index is an error + { + String q = Strings.format("FROM nomatch,%s:%s | STATS count(*)", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), Matchers.containsString("Unknown index [nomatch]")); + } + + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String q = Strings.format("FROM %s,%s:nomatch | STATS count(*)", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch", "skipped", 0) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch", "skipped", 0) + ) + ); + } + + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", null) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", 0) + ) + ); + } + + // since at least one index of the query matches on some cluster, a wildcarded index on skip_un=true is not an error + { + String q = Strings.format("FROM %s,%s:nomatch*", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch*", "skipped", 0) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch*", "skipped", 0) + ) + ); + } + + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true + { + // with non-matching concrete index + String q = Strings.format("FROM %s:nomatch", REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true and the + // index was wildcarded + { + String q = Strings.format("FROM %s:nomatch*", REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all + { + String localExpr = randomFrom("nomatch", "nomatch*"); + String remoteExpr = randomFrom("nomatch", "nomatch*"); + String q = Strings.format("FROM %s,%s:%s", localExpr, REMOTE_CLUSTER_ALIAS, remoteExpr); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + } + + // TODO uncomment and test in follow-on PR which does skip_unavailable handling at execution time + // { + // String q = Strings.format("FROM %s,%s:nomatch,%s:%s*", INDEX1, REMOTE_CLUSTER_ALIAS, REMOTE_CLUSTER_ALIAS, INDEX2); + // + // String limit1 = q + " | LIMIT 1"; + // Response response = client().performRequest(esqlRequest(limit1)); + // assertOK(response); + // + // Map map = responseAsMap(response); + // assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + // assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + // + // assertExpectedClustersForMissingIndicesTests(map, + // List.of( + // new ExpectedCluster("(local)", INDEX1, "successful", null), + // new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch," + INDEX2 + "*", "skipped", 0) + // ) + // ); + // + // String limit0 = q + " | LIMIT 0"; + // response = client().performRequest(esqlRequest(limit0)); + // assertOK(response); + // + // map = responseAsMap(response); + // assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + // assertThat(((ArrayList) map.get("values")).size(), is(0)); + // + // assertExpectedClustersForMissingIndicesTests(map, + // List.of( + // new ExpectedCluster("(local)", INDEX1, "successful", 0), + // new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch," + INDEX2 + "*", "skipped", 0) + // ) + // ); + // } + } + + @SuppressWarnings("unchecked") + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableFalse() throws Exception { + // Remote cluster is closed and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + setupRolesAndPrivileges(); + setupIndex(); + + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, true, randomBoolean(), false); + + // missing concrete local index is an error + { + String q = Strings.format("FROM nomatch,%s:%s | STATS count(*)", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), Matchers.containsString("Unknown index [nomatch]")); + } + + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String q = Strings.format("FROM %s,%s:nomatch | STATS count(*)", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), Matchers.containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", null) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", 0) + ) + ); + } + + // query is fatal since the remote cluster has skip_unavailable=false and has no matching indices + { + String q = Strings.format("FROM %s,%s:nomatch*", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), Matchers.containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all + { + // with non-matching concrete index + String q = Strings.format("FROM %s:nomatch", REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all + { + String localExpr = randomFrom("nomatch", "nomatch*"); + String remoteExpr = randomFrom("nomatch", "nomatch*"); + String q = Strings.format("FROM %s,%s:%s", localExpr, REMOTE_CLUSTER_ALIAS, remoteExpr); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + } + + // error since the remote cluster with skip_unavailable=false specified a concrete index that is not found + { + String q = Strings.format("FROM %s,%s:nomatch,%s:%s*", INDEX1, REMOTE_CLUSTER_ALIAS, REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("no such index [nomatch]", REMOTE_CLUSTER_ALIAS))); + assertThat(e.getMessage(), containsString(Strings.format("index_not_found_exception", REMOTE_CLUSTER_ALIAS))); + + // TODO: in follow on PR, add support for throwing a VerificationException from this scenario + // String limit0 = q + " | LIMIT 0"; + // e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + // assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void setupIndex() throws IOException { + Request createIndex = new Request("PUT", INDEX1); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "score": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "points" } } + { "id": 1, "score": 75} + { "index": { "_index": "points" } } + { "id": 2, "score": 125} + { "index": { "_index": "points" } } + { "id": 3, "score": 100} + { "index": { "_index": "points" } } + { "id": 4, "score": 50} + { "index": { "_index": "points" } } + { "id": 5, "score": 150} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", INDEX2); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "num": { "type": "integer" }, + "square": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "squares"}} + { "num": 1, "square": 1 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 4 } + { "index": {"_index": "squares"}} + { "num": 3, "square": 9 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 16 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(body)); + + return request; + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java new file mode 100644 index 0000000000000..b6fc43e2a6e48 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java @@ -0,0 +1,286 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.greaterThan; + +public class CrossClusterEsqlRCS1UnavailableRemotesIT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Before + public void setupPreRequisites() throws IOException { + setupRolesAndPrivileges(); + loadData(); + } + + public void testEsqlRcs1UnavailableRemoteScenarios() throws Exception { + clusterShutDownWithRandomSkipUnavailable(); + remoteClusterShutdownWithSkipUnavailableTrue(); + remoteClusterShutdownWithSkipUnavailableFalse(); + } + + private void clusterShutDownWithRandomSkipUnavailable() throws Exception { + // skip_unavailable is set to a random boolean value. + // However, no clusters are stopped. Hence, we do not expect any other behaviour + // other than a 200-OK. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), randomBoolean()); + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + assertThat(columns.size(), is(4)); + assertThat(values.size(), is(9)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(2)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("skipped"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("successful")); + } + + @SuppressWarnings("unchecked") + private void remoteClusterShutdownWithSkipUnavailableTrue() throws Exception { + // Remote cluster is stopped and skip unavailable is set to true. + // We expect no exception and partial results from the remaining open cluster. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), true); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + // Assert results obtained from the local cluster and that remote cluster was + // skipped. + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + + assertThat(columns.size(), is(2)); + assertThat(values.size(), is(5)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(1)); + assertThat((int) clusters.get("skipped"), is(1)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("skipped")); + + } catch (ResponseException r) { + throw new AssertionError(r); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void remoteClusterShutdownWithSkipUnavailableFalse() throws Exception { + // Remote cluster is stopped and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), false); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + ResponseException ex = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(query))); + assertThat(ex.getMessage(), containsString("connect_transport_exception")); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void loadData() throws IOException { + Request createIndex = new Request("PUT", "points"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "score": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "points" } } + { "id": 1, "score": 75} + { "index": { "_index": "points" } } + { "id": 2, "score": 125} + { "index": { "_index": "points" } } + { "id": 3, "score": 100} + { "index": { "_index": "points" } } + { "id": 4, "score": 50} + { "index": { "_index": "points" } } + { "id": 5, "score": 150} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", "squares"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "num": { "type": "integer" }, + "square": { "type": "integer" } + } + } + } + """); + assertOK(performRequestAgainstFulfillingCluster(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "squares"}} + { "num": 1, "square": 1 } + { "index": {"_index": "squares"}} + { "num": 2, "square": 4 } + { "index": {"_index": "squares"}} + { "num": 3, "square": 9 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 16 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(Strings.toString(body)); + + return request; + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index d5b3141b539eb..74ef6f0dafe63 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -495,7 +495,7 @@ public void testCrossClusterQueryWithRemoteDLSAndFLS() throws Exception { } /** - * Note: invalid_remote is "invalid" because it has a bogus API key and the cluster does not exist (cannot be connected to) + * Note: invalid_remote is "invalid" because it has a bogus API key */ @SuppressWarnings("unchecked") public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { @@ -521,13 +521,19 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { // invalid remote with local index should return local results { var q = "FROM invalid_remote:employees,employees | SORT emp_id DESC | LIMIT 10"; - Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); - // TODO: when skip_unavailable=false for invalid_remote, a fatal exception should be thrown - // this does not yet happen because field-caps returns nothing for this cluster, rather - // than an error, so the current code cannot detect that error. Follow on PR will handle this. - assertLocalOnlyResults(response); + if (skipUnavailable) { + Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); + // this does not yet happen because field-caps returns nothing for this cluster, rather + // than an error, so the current code cannot detect that error. Follow on PR will handle this. + assertLocalOnlyResultsAndSkippedRemote(response); + } else { + // errors from invalid remote should throw an exception if the cluster is marked with skip_unavailable=false + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(esqlRequest(q))); + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + // TODO: in follow on PR, figure out why this is returning the wrong error - should be "cannot connect to invalid_remote" + assertThat(error.getMessage(), containsString("Unknown index [invalid_remote:employees]")); + } } - { var q = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; // errors from invalid remote should be ignored if the cluster is marked with skip_unavailable=true @@ -560,10 +566,9 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { } else { // errors from invalid remote should throw an exception if the cluster is marked with skip_unavailable=false - ResponseException error = expectThrows(ResponseException.class, () -> { - final Response response1 = performRequestWithRemoteSearchUser(esqlRequest(q)); - }); + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(esqlRequest(q))); assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + // TODO: in follow on PR, figure out why this is returning the wrong error - should be "cannot connect to invalid_remote" assertThat(error.getMessage(), containsString("unable to find apikey")); } } @@ -1049,7 +1054,7 @@ private void assertRemoteOnlyAgainst2IndexResults(Response response) throws IOEx } @SuppressWarnings("unchecked") - private void assertLocalOnlyResults(Response response) throws IOException { + private void assertLocalOnlyResultsAndSkippedRemote(Response response) throws IOException { assertOK(response); Map responseAsMap = entityAsMap(response); List columns = (List) responseAsMap.get("columns"); @@ -1061,6 +1066,34 @@ private void assertLocalOnlyResults(Response response) throws IOException { .collect(Collectors.toList()); // local results assertThat(flatList, containsInAnyOrder("2", "4", "6", "8", "support", "management", "engineering", "marketing")); + Map clusters = (Map) responseAsMap.get("_clusters"); + + /* + clusters map: + {running=0, total=2, details={ + invalid_remote={_shards={total=0, failed=0, successful=0, skipped=0}, took=176, indices=employees, + failures=[{reason={reason=Unable to connect to [invalid_remote], type=connect_transport_exception}, + index=null, shard=-1}], status=skipped}, + (local)={_shards={total=1, failed=0, successful=1, skipped=0}, took=298, indices=employees, status=successful}}, + failed=0, partial=0, successful=1, skipped=1} + */ + + assertThat((int) clusters.get("total"), equalTo(2)); + assertThat((int) clusters.get("successful"), equalTo(1)); + assertThat((int) clusters.get("skipped"), equalTo(1)); + + Map details = (Map) clusters.get("details"); + Map invalidRemoteMap = (Map) details.get("invalid_remote"); + assertThat(invalidRemoteMap.get("status").toString(), equalTo("skipped")); + List failures = (List) invalidRemoteMap.get("failures"); + assertThat(failures.size(), equalTo(1)); + Map failureMap = (Map) failures.get(0); + Map reasonMap = (Map) failureMap.get("reason"); + assertThat(reasonMap.get("reason").toString(), containsString("Unable to connect to [invalid_remote]")); + assertThat(reasonMap.get("type").toString(), containsString("connect_transport_exception")); + + Map localCluster = (Map) details.get("(local)"); + assertThat(localCluster.get("status").toString(), equalTo("successful")); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/security/qa/profile/build.gradle b/x-pack/plugin/security/qa/profile/build.gradle index ac821e670fde0..7465ef9917258 100644 --- a/x-pack/plugin/security/qa/profile/build.gradle +++ b/x-pack/plugin/security/qa/profile/build.gradle @@ -7,7 +7,7 @@ dependencies { javaRestTestImplementation project(':x-pack:plugin:security') } -boolean literalUsername = BuildParams.random.nextBoolean() +boolean literalUsername = buildParams.random.nextBoolean() tasks.named("javaRestTest").configure { usesDefaultDistribution() diff --git a/x-pack/plugin/security/qa/security-basic/build.gradle b/x-pack/plugin/security/qa/security-basic/build.gradle index 7684d879671ab..30751705bd75f 100644 --- a/x-pack/plugin/security/qa/security-basic/build.gradle +++ b/x-pack/plugin/security/qa/security-basic/build.gradle @@ -13,7 +13,7 @@ tasks.named('javaRestTest') { } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/security/qa/security-disabled/build.gradle b/x-pack/plugin/security/qa/security-disabled/build.gradle index eba70753c9f28..0a05eae479d33 100644 --- a/x-pack/plugin/security/qa/security-disabled/build.gradle +++ b/x-pack/plugin/security/qa/security-disabled/build.gradle @@ -17,5 +17,5 @@ dependencies { tasks.named("javaRestTest").configure { usesDefaultDistribution() // Test clusters run with security disabled - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle index fbe91009011e3..e3b51bde45cc8 100644 --- a/x-pack/plugin/security/qa/tls-basic/build.gradle +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -7,7 +7,7 @@ dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('core')))) } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index c0e55992df88f..5c6c3e8c7933c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -377,7 +377,8 @@ public void testOnIndexModuleIsNoOpWithSecurityDisabled() throws Exception { TestIndexNameExpressionResolver.newInstance(threadPool.getThreadContext()), Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + List.of() ); security.onIndexModule(indexModule); // indexReaderWrapper is a SetOnce so if Security#onIndexModule had already set an ReaderWrapper we would get an exception here diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java index 6b9594c1c68ea..87651a96d75a6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java @@ -82,8 +82,9 @@ public void testInvalidateTokensWhenIndexUnavailable() throws Exception { when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(false); when(securityIndex.indexExists()).thenReturn(true); when(securityIndex.defensiveCopy()).thenReturn(securityIndex); - when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)) - .thenReturn(new ElasticsearchException("simulated")); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn( + new ElasticsearchException("simulated") + ); final TokenService tokenService = new TokenService( SETTINGS, Clock.systemUTC(), @@ -102,8 +103,11 @@ public void testInvalidateTokensWhenIndexUnavailable() throws Exception { Tuple newTokenBytes = tokenService.getRandomTokenBytes(true); InvalidateTokenRequest request = new InvalidateTokenRequest( - tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), - ACCESS_TOKEN.getValue(), null, null); + tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), + ACCESS_TOKEN.getValue(), + null, + null + ); PlainActionFuture accessTokenfuture = new PlainActionFuture<>(); action.doExecute(null, request, accessTokenfuture); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, accessTokenfuture::actionGet); @@ -148,8 +152,11 @@ public void testInvalidateTokensWhenIndexClosed() throws Exception { Tuple newTokenBytes = tokenService.getRandomTokenBytes(true); InvalidateTokenRequest request = new InvalidateTokenRequest( - tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), - ACCESS_TOKEN.getValue(), null, null); + tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), + ACCESS_TOKEN.getValue(), + null, + null + ); PlainActionFuture accessTokenfuture = new PlainActionFuture<>(); action.doExecute(null, request, accessTokenfuture); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, accessTokenfuture::actionGet); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index cd6c88cf525af..7b66a95609b05 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -2041,8 +2041,14 @@ public void testExpiredToken() throws Exception { .user(new User("creator")) .realmRef(new RealmRef("test", "test", "test")) .build(false); - tokenService.createOAuth2Tokens(newTokenBytes.v1(), newTokenBytes.v2(), expected, originatingAuth, Collections.emptyMap(), - tokenFuture); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + expected, + originatingAuth, + Collections.emptyMap(), + tokenFuture + ); } String token = tokenFuture.get().getAccessToken(); mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), expected, Map.of(), true, null, client); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java index 7219561dcf9df..aed39b24f217d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java @@ -76,11 +76,13 @@ public void init() throws Exception { } public void testAuthenticateThrowsOnUnsupportedMinVersions() throws IOException { - when(clusterService.state().getMinTransportVersion()).thenReturn(TransportVersionUtils.randomVersionBetween( + when(clusterService.state().getMinTransportVersion()).thenReturn( + TransportVersionUtils.randomVersionBetween( random(), TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) - )); + ) + ); final var authcContext = mock(Authenticator.Context.class, Mockito.RETURNS_DEEP_STUBS); when(authcContext.getThreadContext()).thenReturn(threadContext); final var crossClusterAccessHeaders = new CrossClusterAccessHeaders( @@ -93,12 +95,14 @@ public void testAuthenticateThrowsOnUnsupportedMinVersions() throws IOException when(auditableRequest.exceptionProcessingRequest(any(), any())).thenAnswer( i -> new ElasticsearchSecurityException("potato", (Exception) i.getArguments()[0]) ); - doAnswer(invocationOnMock -> new Authenticator.Context( + doAnswer( + invocationOnMock -> new Authenticator.Context( threadContext, auditableRequest, mock(Realms.class), (AuthenticationToken) invocationOnMock.getArguments()[2] - )).when(authenticationService).newContext(anyString(), any(), any()); + ) + ).when(authenticationService).newContext(anyString(), any(), any()); final PlainActionFuture future = new PlainActionFuture<>(); crossClusterAccessAuthenticationService.authenticate("action", mock(TransportRequest.class), future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java index b35a2f8ccc4d3..02f397c23d3bd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java @@ -326,7 +326,7 @@ public void testRecordingFailedAuthenticationMetric() { @SuppressWarnings("unchecked") final ActionListener> listener = (ActionListener>) invocationOnMock .getArguments()[1]; - listener.onResponse(AuthenticationResult.unsuccessful("unsuccessful realms authentication", null)); + listener.onResponse(AuthenticationResult.unsuccessful("unsuccessful realms authentication", null)); return null; }).when(unsuccessfulRealm).authenticate(eq(authenticationToken), any()); @@ -337,7 +337,7 @@ public void testRecordingFailedAuthenticationMetric() { final PlainActionFuture> future = new PlainActionFuture<>(); realmsAuthenticator.authenticate(context, future); - var e = expectThrows(ElasticsearchSecurityException.class, () -> future.actionGet()); + var e = expectThrows(ElasticsearchSecurityException.class, () -> future.actionGet()); assertThat(e, sameInstance(exception)); assertSingleFailedAuthMetric( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 73a45dc20ac42..ed3949450cb9f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -363,9 +363,7 @@ public void testGetPrivilegesWillOnlyWaitOnUnavailableShardException() { public void testGetPrivilegesFailsAfterWaitOnUnavailableShardException() { when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(false).thenReturn(false); - when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn( - unavailableShardsException() - ); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(unavailableShardsException()); doAnswer(invocation -> { @SuppressWarnings("unchecked") final var listener = (ActionListener) invocation.getArguments()[0]; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java index 5d4ea0f30cb15..8509a6475aa71 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java @@ -58,7 +58,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityEnabled ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, securityEnabled ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java index 6ff05faf22d11..b734e602ec291 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java @@ -56,7 +56,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; final var fakeRestRequest = new FakeRestRequest(); - final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), requiredSettingsEnabled ? 0 : 1); + final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, requiredSettingsEnabled ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 9a05230d82ae6..79dba637d53d0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -75,7 +75,7 @@ public void testCreateApiKeyApi() throws Exception { ).withParams(Collections.singletonMap("refresh", randomFrom("false", "true", "wait_for"))).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java index 812354986d5bc..a47855731b37a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java @@ -115,7 +115,7 @@ public void testLicenseEnforcement() throws Exception { } }"""), XContentType.JSON).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index d88a217cd0949..c65634a76b532 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -91,7 +91,7 @@ public void testGetApiKey() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(params).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -159,7 +159,7 @@ public void testGetApiKeyWithProfileUid() throws Exception { } final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -224,7 +224,7 @@ public void testGetApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index ac472378d4874..2cb1b6a66b02b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -77,7 +77,7 @@ public void testInvalidateApiKey() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -144,7 +144,7 @@ public void testInvalidateApiKeyOwnedByCurrentAuthenticatedUser() throws Excepti ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index d5aa249b1d0f5..7005b5158e626 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -110,7 +110,7 @@ public void testQueryParsing() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -184,7 +184,7 @@ public void testAggsAndAggregationsTogether() { XContentType.JSON ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -230,7 +230,7 @@ public void testParsingSearchParameters() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -290,7 +290,7 @@ public void testQueryApiKeyWithProfileUid() throws Exception { } FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java index ddeffc0675498..6c71f30243eaf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java @@ -89,15 +89,12 @@ public void testLicenseEnforcement() throws Exception { // Disallow by license when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(false); - final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( - new BytesArray(""" - { - "metadata": {} - }"""), - XContentType.JSON - ).withParams(Map.of("id", randomAlphaOfLength(10))).build(); + final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray(""" + { + "metadata": {} + }"""), XContentType.JSON).withParams(Map.of("id", randomAlphaOfLength(10))).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java index 2ac33a780313e..bd665560f425f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java @@ -43,7 +43,7 @@ public class RestGetTokenActionTests extends ESTestCase { public void testListenerHandlesExceptionProperly() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -67,7 +67,7 @@ public void sendResponse(RestResponse restResponse) { public void testSendResponse() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -114,7 +114,7 @@ public void sendResponse(RestResponse restResponse) { public void testSendResponseKerberosError() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java index 4a593eeb24ac6..38405a2167808 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java @@ -73,7 +73,7 @@ public void testQueryParsing() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -132,7 +132,7 @@ public void testParsingSearchParameters() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle index 45bca88600495..515ffca4a59bf 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle @@ -13,7 +13,7 @@ dependencies { javaRestTestImplementation project(':x-pack:qa') } -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle index 32cab39f665d3..4c98276abe154 100644 --- a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle +++ b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle @@ -36,7 +36,7 @@ tasks.register("copyTestNodeKeyMaterial", Copy) { into outputDir } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests diff --git a/x-pack/plugin/slm/qa/multi-node/build.gradle b/x-pack/plugin/slm/qa/multi-node/build.gradle index 1f4b0c3b10c30..d6b1fe8a1e219 100644 --- a/x-pack/plugin/slm/qa/multi-node/build.gradle +++ b/x-pack/plugin/slm/qa/multi-node/build.gradle @@ -31,7 +31,7 @@ testClusters.configureEach { setting 'logger.org.elasticsearch.xpack.slm', 'TRACE' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle index 03426bdddce62..cb2831f0cf273 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle @@ -39,7 +39,7 @@ tasks.named("javaRestTest").configure { systemProperty 'test.azure.container', azureContainer systemProperty 'test.azure.key', azureKey systemProperty 'test.azure.sas_token', azureSasToken - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_snapshot_based_recoveries_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_snapshot_based_recoveries_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle index 267ed84aa45d4..7550ab8585e13 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle @@ -28,7 +28,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_snapshot_based_recoveries_tests" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle index b669641363bd1..e676e1f1f2162 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle @@ -47,7 +47,7 @@ tasks.named("javaRestTest").configure { systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) nonInputProperties.systemProperty 'test.s3.base_path', - s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed : 'base_path_integration_tests' + s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle index e304b2ff5c263..af4ed719a9c2f 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle @@ -46,7 +46,7 @@ tasks.named("javaRestTest") { systemProperty 'test.azure.sas_token', azureSasToken systemProperty 'test.azure.tenant_id', azureTenantId systemProperty 'test.azure.client_id', azureClientId - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { @@ -61,7 +61,7 @@ tasks.register("managedIdentityJavaRestTest", RestIntegTestTask) { systemProperty 'test.azure.container', azureContainer // omitting key and sas_token so that we use a bearer token from the metadata service // omitting client_id and tenant_id so that we use a bearer token from the metadata service, not from workload identity - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + buildParams.testSeed } tasks.register("workloadIdentityJavaRestTest", RestIntegTestTask) { @@ -74,10 +74,10 @@ tasks.register("workloadIdentityJavaRestTest", RestIntegTestTask) { systemProperty 'test.azure.tenant_id', azureTenantId ?: "583d4f71-148a-4163-bad5-2311e13c60dc" systemProperty 'test.azure.client_id', azureClientId ?: "86dd1b33-96c1-4a2e-92ac-b844404fc691" // omitting key and sas_token so that we use a bearer token from workload identity - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + buildParams.testSeed } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // Cannot override the trust store in FIPS mode, and these tasks require a HTTPS fixture tasks.named("managedIdentityJavaRestTest").configure { enabled = false } tasks.named("workloadIdentityJavaRestTest").configure { enabled = false } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle index 4f0a1c4faf0af..b7e1036ab3e26 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle @@ -36,7 +36,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repository_test_kit_tests" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repository_test_kit_tests" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle index 3fbb55ca4eb3a..14e2b05bc140e 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle @@ -34,7 +34,7 @@ dependencies { tasks.named("javaRestTest").configure { usesDefaultDistribution() description = "Runs rest tests against an elasticsearch cluster with HDFS." - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) // required for krb5kdc-fixture to work jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle index 21cf952f05bf1..313a11f8ce431 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle @@ -45,7 +45,7 @@ tasks.named("javaRestTest").configure { systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) nonInputProperties.systemProperty 'test.s3.base_path', - s3BasePath ? s3BasePath + "_repo_test_kit_tests" + BuildParams.testSeed : 'base_path_integration_tests' + s3BasePath ? s3BasePath + "_repo_test_kit_tests" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/spatial/build.gradle b/x-pack/plugin/spatial/build.gradle index 5bcec68c227ce..4304bae5b9991 100644 --- a/x-pack/plugin/spatial/build.gradle +++ b/x-pack/plugin/spatial/build.gradle @@ -28,7 +28,7 @@ testClusters.configureEach { setting 'xpack.security.enabled', 'false' } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index 86575d418e605..1a9eb1fde6c87 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -797,12 +798,12 @@ private void assertGeoLine_TSDB( ArrayList points = testData.pointsForGroup(g); ArrayList timestamps = testData.timestampsForGroup(g); for (int i = 0; i < points.size(); i++) { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - builder.addString("group_id", testData.groups[g]); + var routingFields = new RoutingPathFields(null); + routingFields.addString("group_id", testData.groups[g]); ArrayList fields = new ArrayList<>( Arrays.asList( new SortedDocValuesField("group_id", new BytesRef(testData.groups[g])), - new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildTsidHash().toBytesRef()) + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, routingFields.buildHash().toBytesRef()) ) ); GeoPoint point = points.get(i); diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 85d778f9ec87f..d1dcbc3adbd95 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -137,7 +137,7 @@ allprojects { } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("internalClusterTest").configure{enabled = false } } diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 138f3e63af462..d1b179f09e403 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -1,6 +1,6 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' description = 'JDBC driver for Elasticsearch' diff --git a/x-pack/plugin/sql/qa/jdbc/build.gradle b/x-pack/plugin/sql/qa/jdbc/build.gradle index 022306fe9b306..a444399ed28ce 100644 --- a/x-pack/plugin/sql/qa/jdbc/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/build.gradle @@ -72,11 +72,11 @@ subprojects { // Configure compatibility testing tasks // Compatibility testing for JDBC driver started with version 7.9.0 - BuildParams.bwcVersions.indexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> + buildParams.bwcVersions.indexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> def baseName = "v${bwcVersion}" def cluster = testClusters.register(baseName) - UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) + UnreleasedVersionInfo unreleasedVersion = buildParams.bwcVersions.unreleasedInfo(bwcVersion) Configuration driverConfiguration = configurations.create("jdbcDriver${baseName}") { // TODO: Temporary workaround for https://github.com/elastic/elasticsearch/issues/73433 transitive = false diff --git a/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle index ec88fcffa941c..971c7bf319244 100644 --- a/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle @@ -11,5 +11,5 @@ testClusters.configureEach { // JDBC client can only be configured for SSL with keystores, but we can't use JKS/PKCS12 keystores in FIPS 140-2 mode. tasks.withType(Test).configureEach { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/sql/qa/mixed-node/build.gradle b/x-pack/plugin/sql/qa/mixed-node/build.gradle index 412dec62f81f8..06e3b61d5b303 100644 --- a/x-pack/plugin/sql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/sql/qa/mixed-node/build.gradle @@ -19,7 +19,7 @@ testClusters.configureEach { tasks.named("javaRestTest").configure{ enabled = false} // A bug (https://github.com/elastic/elasticsearch/issues/68439) limits us to perform tests with versions from 7.10.3 onwards -BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.3") && +buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.3") && v != VersionProperties.getElasticsearchVersion()) { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { diff --git a/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle index 907d72e606bda..51a3f83a909af 100644 --- a/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle @@ -6,7 +6,7 @@ tasks.named("javaRestTest").configure { // Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail. // TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores. // https://github.com/elastic/elasticsearch/issues/32306 - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } testClusters.matching { it.name == "javaRestTest" }.configureEach { diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index b9713bcb8e7a3..cd24dcc15c863 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -8,7 +8,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' /* We don't use the 'application' plugin because it builds a zip and tgz which * we don't want. */ @@ -55,7 +55,7 @@ tasks.register("runcli") { description = 'Run the CLI and connect to elasticsearch running on 9200' dependsOn "shadowJar" doLast { - List command = ["${BuildParams.runtimeJavaHome}/bin/java"] + List command = ["${buildParams.runtimeJavaHome.get()}/bin/java"] if ('true'.equals(providers.systemProperty('debug').orElse('false').get())) { command += '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000' } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml index ea7684fb69a09..9fbe69ac05f0a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml @@ -234,3 +234,58 @@ - match: { values.2.1: "2024-08-01T00:00:00.000Z" } - match: { values.3.0: 1 } - match: { values.3.1: "2024-09-01T00:00:00.000Z" } + +--- +"Datetime interval as string": + - requires: + test_runner_features: [allowed_warnings_regex, capabilities] + capabilities: + - method: POST + path: /_query + parameters: [ ] + capabilities: [ implicit_casting_string_literal_to_temporal_amount ] + reason: "interval in parameters as string" + + - do: + indices.create: + index: test_bucket + body: + mappings: + properties: + ts : + type : date + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-06-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-08-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-09-16" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, ?bucket) | SORT b' + params: [{"bucket" : "1 month"}] + + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-06-01T00:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-01T00:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-08-01T00:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-09-01T00:00:00.000Z" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 6e7098da33805..4c3b16c5dc309 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -30,7 +30,7 @@ setup: - method: POST path: /_query parameters: [] - capabilities: [ snapshot_test_for_telemetry, fn_bit_length ] + capabilities: [ snapshot_test_for_telemetry, fn_byte_length ] reason: "Test that should only be executed on snapshot versions" - do: {xpack.usage: {}} @@ -91,7 +91,8 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 119} # check the "sister" test below for a likely update to the same esql.functions length check + # Testing for the entire function set isn't feasbile, so we just check that we return the correct count as an approximation. + - length: {esql.functions: 120} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": @@ -101,7 +102,7 @@ setup: - method: POST path: /_query parameters: [] - capabilities: [ non_snapshot_test_for_telemetry, fn_bit_length ] + capabilities: [ non_snapshot_test_for_telemetry, fn_byte_length ] reason: "Test that should only be executed on release versions" - do: {xpack.usage: {}} @@ -162,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 116} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 117} # check the "sister" test above for a likely update to the same esql.functions length check diff --git a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle index b429e123bb631..eb0551a4d10e1 100644 --- a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle @@ -12,7 +12,7 @@ dependencies { testImplementation project(':x-pack:qa') } -Version ccsCompatVersion = BuildParams.bwcVersions.minimumWireCompatibleVersion +Version ccsCompatVersion = buildParams.bwcVersions.minimumWireCompatibleVersion restResources { restApi { diff --git a/x-pack/plugin/watcher/qa/rest/build.gradle b/x-pack/plugin/watcher/qa/rest/build.gradle index a911c022212b2..8382a71092720 100644 --- a/x-pack/plugin/watcher/qa/rest/build.gradle +++ b/x-pack/plugin/watcher/qa/rest/build.gradle @@ -29,7 +29,7 @@ testClusters.configureEach { setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java index d11cb7521976a..a979d614fe38f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -26,7 +26,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Predicate; @@ -56,9 +58,72 @@ public class EmailService extends NotificationService { (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope) ); + private static final List ALLOW_ALL_DEFAULT = List.of("*"); + private static final Setting> SETTING_DOMAIN_ALLOWLIST = Setting.stringListSetting( "xpack.notification.email.account.domain_allowlist", - List.of("*"), + ALLOW_ALL_DEFAULT, + new Setting.Validator<>() { + @Override + public void validate(List value) { + // Ignored + } + + @Override + @SuppressWarnings("unchecked") + public void validate(List value, Map, Object> settings) { + List recipientAllowPatterns = (List) settings.get(SETTING_RECIPIENT_ALLOW_PATTERNS); + if (value.equals(ALLOW_ALL_DEFAULT) == false && recipientAllowPatterns.equals(ALLOW_ALL_DEFAULT) == false) { + throw new IllegalArgumentException( + "Cannot set both [" + + SETTING_RECIPIENT_ALLOW_PATTERNS.getKey() + + "] and [" + + SETTING_DOMAIN_ALLOWLIST.getKey() + + "] to a non [\"*\"] value at the same time." + ); + } + } + + @Override + public Iterator> settings() { + List> settingRecipientAllowPatterns = List.of(SETTING_RECIPIENT_ALLOW_PATTERNS); + return settingRecipientAllowPatterns.iterator(); + } + }, + Property.Dynamic, + Property.NodeScope + ); + + private static final Setting> SETTING_RECIPIENT_ALLOW_PATTERNS = Setting.stringListSetting( + "xpack.notification.email.recipient_allowlist", + ALLOW_ALL_DEFAULT, + new Setting.Validator<>() { + @Override + public void validate(List value) { + // Ignored + } + + @Override + @SuppressWarnings("unchecked") + public void validate(List value, Map, Object> settings) { + List domainAllowList = (List) settings.get(SETTING_DOMAIN_ALLOWLIST); + if (value.equals(ALLOW_ALL_DEFAULT) == false && domainAllowList.equals(ALLOW_ALL_DEFAULT) == false) { + throw new IllegalArgumentException( + "Connect set both [" + + SETTING_RECIPIENT_ALLOW_PATTERNS.getKey() + + "] and [" + + SETTING_DOMAIN_ALLOWLIST.getKey() + + "] to a non [\"*\"] value at the same time." + ); + } + } + + @Override + public Iterator> settings() { + List> settingDomainAllowlist = List.of(SETTING_DOMAIN_ALLOWLIST); + return settingDomainAllowlist.iterator(); + } + }, Property.Dynamic, Property.NodeScope ); @@ -167,6 +232,7 @@ public class EmailService extends NotificationService { private final CryptoService cryptoService; private final SSLService sslService; private volatile Set allowedDomains; + private volatile Set allowedRecipientPatterns; @SuppressWarnings("this-escape") public EmailService(Settings settings, @Nullable CryptoService cryptoService, SSLService sslService, ClusterSettings clusterSettings) { @@ -192,7 +258,9 @@ public EmailService(Settings settings, @Nullable CryptoService cryptoService, SS clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_SEND_PARTIAL, (s, o) -> {}, (s, o) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_WAIT_ON_QUIT, (s, o) -> {}, (s, o) -> {}); this.allowedDomains = new HashSet<>(SETTING_DOMAIN_ALLOWLIST.get(settings)); + this.allowedRecipientPatterns = new HashSet<>(SETTING_RECIPIENT_ALLOW_PATTERNS.get(settings)); clusterSettings.addSettingsUpdateConsumer(SETTING_DOMAIN_ALLOWLIST, this::updateAllowedDomains); + clusterSettings.addSettingsUpdateConsumer(SETTING_RECIPIENT_ALLOW_PATTERNS, this::updateAllowedRecipientPatterns); // do an initial load reload(settings); } @@ -201,6 +269,10 @@ void updateAllowedDomains(List newDomains) { this.allowedDomains = new HashSet<>(newDomains); } + void updateAllowedRecipientPatterns(List newPatterns) { + this.allowedRecipientPatterns = new HashSet<>(newPatterns); + } + @Override protected Account createAccount(String name, Settings accountSettings) { Account.Config config = new Account.Config(name, accountSettings, getSmtpSslSocketFactory(), logger); @@ -228,33 +300,47 @@ public EmailSent send(Email email, Authentication auth, Profile profile, String "failed to send email with subject [" + email.subject() + "] and recipient domains " - + getRecipientDomains(email) + + getRecipients(email, true) + ", one or more recipients is not specified in the domain allow list setting [" + SETTING_DOMAIN_ALLOWLIST.getKey() + "]." ); } + if (recipientAddressInAllowList(email, this.allowedRecipientPatterns) == false) { + throw new IllegalArgumentException( + "failed to send email with subject [" + + email.subject() + + "] and recipients " + + getRecipients(email, false) + + ", one or more recipients is not specified in the domain allow list setting [" + + SETTING_RECIPIENT_ALLOW_PATTERNS.getKey() + + "]." + ); + } return send(email, auth, profile, account); } // Visible for testing - static Set getRecipientDomains(Email email) { - return Stream.concat( + static Set getRecipients(Email email, boolean domainsOnly) { + var stream = Stream.concat( Optional.ofNullable(email.to()).map(addrs -> Arrays.stream(addrs.toArray())).orElse(Stream.empty()), Stream.concat( Optional.ofNullable(email.cc()).map(addrs -> Arrays.stream(addrs.toArray())).orElse(Stream.empty()), Optional.ofNullable(email.bcc()).map(addrs -> Arrays.stream(addrs.toArray())).orElse(Stream.empty()) ) - ) - .map(InternetAddress::getAddress) - // Pull out only the domain of the email address, so foo@bar.com -> bar.com - .map(emailAddress -> emailAddress.substring(emailAddress.lastIndexOf('@') + 1)) - .collect(Collectors.toSet()); + ).map(InternetAddress::getAddress); + + if (domainsOnly) { + // Pull out only the domain of the email address, so foo@bar.com becomes bar.com + stream = stream.map(emailAddress -> emailAddress.substring(emailAddress.lastIndexOf('@') + 1)); + } + + return stream.collect(Collectors.toSet()); } // Visible for testing static boolean recipientDomainsInAllowList(Email email, Set allowedDomainSet) { - if (allowedDomainSet.size() == 0) { + if (allowedDomainSet.isEmpty()) { // Nothing is allowed return false; } @@ -262,12 +348,29 @@ static boolean recipientDomainsInAllowList(Email email, Set allowedDomai // Don't bother checking, because there is a wildcard all return true; } - final Set domains = getRecipientDomains(email); + final Set domains = getRecipients(email, true); final Predicate matchesAnyAllowedDomain = domain -> allowedDomainSet.stream() .anyMatch(allowedDomain -> Regex.simpleMatch(allowedDomain, domain, true)); return domains.stream().allMatch(matchesAnyAllowedDomain); } + // Visible for testing + static boolean recipientAddressInAllowList(Email email, Set allowedRecipientPatterns) { + if (allowedRecipientPatterns.isEmpty()) { + // Nothing is allowed + return false; + } + if (allowedRecipientPatterns.contains("*")) { + // Don't bother checking, because there is a wildcard all + return true; + } + + final Set recipients = getRecipients(email, false); + final Predicate matchesAnyAllowedRecipient = recipient -> allowedRecipientPatterns.stream() + .anyMatch(pattern -> Regex.simpleMatch(pattern, recipient, true)); + return recipients.stream().allMatch(matchesAnyAllowedRecipient); + } + private static EmailSent send(Email email, Authentication auth, Profile profile, Account account) throws MessagingException { assert account != null; try { @@ -304,6 +407,7 @@ private static List> getDynamicSettings() { return Arrays.asList( SETTING_DEFAULT_ACCOUNT, SETTING_DOMAIN_ALLOWLIST, + SETTING_RECIPIENT_ALLOW_PATTERNS, SETTING_PROFILE, SETTING_EMAIL_DEFAULTS, SETTING_SMTP_AUTH, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java index 0407c2db63ac6..f2c43a91bce74 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java @@ -68,7 +68,7 @@ public TransportUpdateWatcherSettingsAction( clusterService, threadPool, actionFilters, - UpdateWatcherSettingsAction.Request::readFrom, + UpdateWatcherSettingsAction.Request::new, indexNameExpressionResolver, AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index 70896a67a9468..e8d6a2868a496 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -70,7 +70,8 @@ public void testWatcherDisabledTests() throws Exception { TestIndexNameExpressionResolver.newInstance(), Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + List.of() ); // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it watcher.onIndexModule(indexModule); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java index a0ce8b18d8a96..4a668d0f9817a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java @@ -69,6 +69,31 @@ public void testSend() throws Exception { assertThat(sent.account(), is("account1")); } + public void testDomainAndRecipientAllowCantBeSetAtSameTime() { + Settings settings = Settings.builder() + .putList("xpack.notification.email.account.domain_allowlist", "bar.com") + .putList("xpack.notification.email.recipient_allowlist", "*-user@potato.com") + .build(); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new EmailService( + settings, + null, + mock(SSLService.class), + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())) + ) + ); + + assertThat( + e.getMessage(), + containsString( + "Cannot set both [xpack.notification.email.recipient_allowlist] and " + + "[xpack.notification.email.account.domain_allowlist] to a non [\"*\"] value at the same time." + ) + ); + } + public void testAccountSmtpPropertyConfiguration() { Settings settings = Settings.builder() .put("xpack.notification.email.account.account1.smtp.host", "localhost") @@ -140,7 +165,7 @@ public void testExtractDomains() throws Exception { Collections.emptyMap() ); assertThat( - EmailService.getRecipientDomains(email), + EmailService.getRecipients(email, true), containsInAnyOrder("bar.com", "eggplant.com", "example.com", "another.com", "bcc.com") ); @@ -158,7 +183,7 @@ public void testExtractDomains() throws Exception { "htmlbody", Collections.emptyMap() ); - assertThat(EmailService.getRecipientDomains(email), containsInAnyOrder("bar.com", "eggplant.com", "example.com")); + assertThat(EmailService.getRecipients(email, true), containsInAnyOrder("bar.com", "eggplant.com", "example.com")); } public void testAllowedDomain() throws Exception { @@ -322,6 +347,264 @@ public void testChangeDomainAllowListSetting() throws UnsupportedEncodingExcepti assertThat(e2.getMessage(), containsString("port out of range")); } + public void testRecipientAddressInAllowList_EmptyAllowedPatterns() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of(); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testRecipientAddressInAllowList_WildcardPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("*"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_SpecificPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("foo@bar.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testRecipientAddressInAllowList_MultiplePatterns() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("foo@bar.com", "baz@potato.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_MixedCasePatterns() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("FOO@BAR.COM", "BAZ@POTATO.COM"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_PartialWildcardPrefixPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("foo@*", "baz@*"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_PartialWildcardSuffixPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("*@bar.com", "*@potato.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_DisallowedCCAddressesFails() throws UnsupportedEncodingException { + Email email = new Email( + "id", + new Email.Address("sender@domain.com", "Sender"), + createAddressList("foo@bar.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + createAddressList("cc@allowed.com", "cc@notallowed.com"), + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + Set allowedPatterns = Set.of("foo@bar.com", "cc@allowed.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testRecipientAddressInAllowList_DisallowedBCCAddressesFails() throws UnsupportedEncodingException { + Email email = new Email( + "id", + new Email.Address("sender@domain.com", "Sender"), + createAddressList("foo@bar.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + null, + createAddressList("bcc@allowed.com", "bcc@notallowed.com"), + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + Set allowedPatterns = Set.of("foo@bar.com", "bcc@allowed.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testAllowedRecipient() throws Exception { + Email email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*"))); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of())); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of(""))); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("foo@other.com", "*o@bar.com"))); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("buzz@other.com", "*.com"))); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*.CoM"))); + + // Invalid email in CC doesn't blow up + email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + createAddressList("badEmail"), + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of("*@other.com", "*iii@bar.com"))); + + // Check CC + email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + createAddressList("thing@other.com"), + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*@other.com", "*@bar.com"))); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of("*oo@bar.com"))); + + // Check BCC + email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + null, + createAddressList("thing@other.com"), + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*@other.com", "*@bar.com"))); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of("*oo@bar.com"))); + } + + public void testSendEmailWithRecipientNotInAllowList() throws Exception { + service.updateAllowedRecipientPatterns(Collections.singletonList(randomFrom("*@bar.*", "*@bar.com", "*b*"))); + Email email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com", "non-whitelisted@invalid.com"), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + when(account.name()).thenReturn("account1"); + Authentication auth = new Authentication("user", new Secret("passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.send(email, auth, profile, "account1")); + assertThat( + e.getMessage(), + containsString( + "failed to send email with subject [subject] and recipients [non-whitelisted@invalid.com, foo@bar.com], " + + "one or more recipients is not specified in the domain allow list setting " + + "[xpack.notification.email.recipient_allowlist]." + ) + ); + } + + public void testChangeRecipientAllowListSetting() throws UnsupportedEncodingException, MessagingException { + Settings settings = Settings.builder() + .put("xpack.notification.email.account.account1.foo", "bar") + // Setting a random SMTP server name and an invalid port so that sending emails is guaranteed to fail: + .put("xpack.notification.email.account.account1.smtp.host", randomAlphaOfLength(10)) + .put("xpack.notification.email.account.account1.smtp.port", -100) + .putList("xpack.notification.email.recipient_allowlist", "*oo@bar.com") + .build(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())); + EmailService emailService = new EmailService(settings, null, mock(SSLService.class), clusterSettings); + Email email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com", "non-whitelisted@invalid.com"), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + when(account.name()).thenReturn("account1"); + Authentication auth = new Authentication("user", new Secret("passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + + // This send will fail because one of the recipients ("non-whitelisted@invalid.com") is in a domain that is not in the allowed list + IllegalArgumentException e1 = expectThrows( + IllegalArgumentException.class, + () -> emailService.send(email, auth, profile, "account1") + ); + assertThat( + e1.getMessage(), + containsString( + "failed to send email with subject [subject] and recipients [non-whitelisted@invalid.com, foo@bar.com], " + + "one or more recipients is not specified in the domain allow list setting " + + "[xpack.notification.email.recipient_allowlist]." + ) + ); + + // Now dynamically add "invalid.com" to the list of allowed domains: + Settings newSettings = Settings.builder() + .putList("xpack.notification.email.recipient_allowlist", "*@bar.com", "*@invalid.com") + .build(); + clusterSettings.applySettings(newSettings); + // Still expect an exception because we're not actually sending the email, but it's no longer because the domain isn't allowed: + IllegalArgumentException e2 = expectThrows( + IllegalArgumentException.class, + () -> emailService.send(email, auth, profile, "account1") + ); + assertThat(e2.getMessage(), containsString("port out of range")); + } + + private Email createTestEmail(String... recipients) throws UnsupportedEncodingException { + return new Email( + "id", + new Email.Address("sender@domain.com", "Sender"), + createAddressList(recipients), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList(recipients), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + } + private static Email.AddressList createAddressList(String... emails) throws UnsupportedEncodingException { List addresses = new ArrayList<>(); for (String email : emails) { diff --git a/x-pack/plugin/wildcard/build.gradle b/x-pack/plugin/wildcard/build.gradle index 2bcf0db057aa5..b582f3fcea903 100644 --- a/x-pack/plugin/wildcard/build.gradle +++ b/x-pack/plugin/wildcard/build.gradle @@ -20,7 +20,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle index 0b8e459ed231b..8a67a2c1dde0d 100644 --- a/x-pack/qa/core-rest-tests-with-security/build.gradle +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -27,7 +27,7 @@ tasks.named("yamlRestTest").configure { 'index/10_with_id/Index with ID', 'indices.get_alias/10_basic/Get alias against closed indices' ]; - if (BuildParams.isSnapshotBuild() == false) { + if (buildParams.isSnapshotBuild() == false) { blacklist += [ 'synonyms_privileges/10_synonyms_with_privileges/*', 'synonyms_privileges/20_synonyms_no_privileges/*' diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 7248d1b0a6bfb..d6b05242f613b 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -15,7 +15,7 @@ dependencies { } -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java deleted file mode 100644 index fee6910fcf6c0..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.restart; - -import com.carrotsearch.randomizedtesting.annotations.Name; - -import org.apache.http.util.EntityUtils; -import org.elasticsearch.Build; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; -import org.junit.Before; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.not; - -@UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) -// Remove the whole test suite (superseded by SystemIndexMappingUpdateServiceIT#testSystemIndexManagerUpgradesMappings) -public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { - - public WatcherMappingUpdateIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Before - public void setup() { - // This test is superseded by SystemIndexMappingUpdateServiceIT#testSystemIndexManagerUpgradesMappings for newer versions - assumeFalse( - "Starting from 8.11, the mappings upgrade service uses mappings versions instead of node versions", - clusterHasFeature(RestTestLegacyFeatures.MAPPINGS_UPGRADE_SERVICE_USES_MAPPINGS_VERSION) - ); - } - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - - public void testMappingsAreUpdated() throws Exception { - if (isRunningAgainstOldCluster()) { - // post a watch - Request putWatchRequest = new Request("PUT", "_watcher/watch/log_error_watch"); - putWatchRequest.setJsonEntity(""" - { - "trigger" : { - "schedule" : { "interval" : "10s" } - }, - "input" : { - "search" : { - "request" : { - "indices" : [ "logs" ], - "body" : { - "query" : { - "match" : { "message": "error" } - } - } - } - } - } - } - """); - client().performRequest(putWatchRequest); - - assertMappingVersion(".watches", getOldClusterVersion()); - } else { - assertMappingVersion(".watches", Build.current().version()); - } - } - - private void assertMappingVersion(String index, String clusterVersion) throws Exception { - assertBusy(() -> { - Request mappingRequest = new Request("GET", index + "/_mappings"); - mappingRequest.setOptions(getWarningHandlerOptions(index)); - Response response = client().performRequest(mappingRequest); - String responseBody = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(responseBody, containsString("\"version\":\"" + clusterVersion + "\"")); - }, 60L, TimeUnit.SECONDS); - } - - private void assertNoMappingVersion(String index) throws Exception { - assertBusy(() -> { - Request mappingRequest = new Request("GET", index + "/_mappings"); - assert isRunningAgainstOldCluster(); - mappingRequest.setOptions(getWarningHandlerOptions(index)); - Response response = client().performRequest(mappingRequest); - String responseBody = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(responseBody, not(containsString("\"version\":\""))); - }, 60L, TimeUnit.SECONDS); - } - - private RequestOptions.Builder getWarningHandlerOptions(String index) { - return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(w -> w.size() > 0 && w.contains(getWatcherSystemIndexWarning(index)) == false); - } - - private String getWatcherSystemIndexWarning(String index) { - return "this request accesses system indices: [" - + index - + "], but in a future major version, " - + "direct access to system indices will be prevented by default"; - } -} diff --git a/x-pack/qa/mixed-tier-cluster/build.gradle b/x-pack/qa/mixed-tier-cluster/build.gradle index bf05be45e18a0..79e7d6a655993 100644 --- a/x-pack/qa/mixed-tier-cluster/build.gradle +++ b/x-pack/qa/mixed-tier-cluster/build.gradle @@ -10,7 +10,7 @@ dependencies { } // Only run tests for 7.9+, since the node.roles setting was introduced in 7.9.0 -BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && +buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && v != VersionProperties.getElasticsearchVersion()) { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { @@ -54,5 +54,5 @@ tasks.withType(Test).configureEach { classpath = sourceSets.javaRestTest.runtimeClasspath testClassesDirs = sourceSets.javaRestTest.output.classesDirs // Security is explicitly disabled, do not run tests in FIPS mode - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index 6d41c4eddf31c..9c0648abca21b 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -13,7 +13,7 @@ restResources { } // randomise between sniff and proxy modes -boolean proxyMode = BuildParams.random.nextBoolean() +boolean proxyMode = buildParams.random.nextBoolean() def fulfillingCluster = testClusters.register('fulfilling-cluster') { setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle index 69c0e8b20c2c4..ca79bb7ec3825 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle @@ -13,7 +13,7 @@ restResources { } // randomise between sniff and proxy modes -boolean proxyMode = BuildParams.random.nextBoolean() +boolean proxyMode = buildParams.random.nextBoolean() def fulfillingCluster = testClusters.register('fulfilling-cluster') { setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle index 1164aa240ee22..b9f8369763476 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle @@ -23,7 +23,7 @@ tasks.register("copyCerts", Sync) { } // randomise between sniff and proxy modes -boolean proxyMode = BuildParams.random.nextBoolean() +boolean proxyMode = buildParams.random.nextBoolean() def fulfillingCluster = testClusters.register('fulfilling-cluster') { setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 8f46613d5d9f0..b53539b224861 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -11,5 +11,5 @@ dependencies { tasks.named('javaRestTest') { usesDefaultDistribution() // test suite uses jks which is not supported in fips mode - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/qa/repository-old-versions/build.gradle b/x-pack/qa/repository-old-versions/build.gradle index 1abf6662a1b8b..78cfc0f688e4a 100644 --- a/x-pack/qa/repository-old-versions/build.gradle +++ b/x-pack/qa/repository-old-versions/build.gradle @@ -98,7 +98,7 @@ if (OS.current() == OS.WINDOWS) { TaskProvider fixture = tasks.register("oldES${versionNoDots}Fixture", AntFixture) { dependsOn project.configurations.oldesFixture, jdks.legacy, config - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" // old versions of Elasticsearch need JAVA_HOME env 'JAVA_HOME', jdks.legacy.javaHomePath diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index a7ea1695c477a..09b3b7db7c917 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -9,7 +9,7 @@ dependencies { testImplementation project(':x-pack:qa') } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { testDistribution = "DEFAULT" versions = [bwcVersion.toString(), project.version] @@ -78,5 +78,5 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> // Security is explicitly disabled, do not run tests in FIPS mode tasks.withType(Test).configureEach { - enabled = BuildParams.inFipsJvm == false -} \ No newline at end of file + enabled = buildParams.inFipsJvm == false +} diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle index 969ba23e19254..0d1cfbd5ff022 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle +++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle @@ -9,7 +9,7 @@ dependencies { testImplementation project(':x-pack:qa') } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> def baseLeaderCluster = testClusters.register("${baseName}-leader") { numberOfNodes = 3 @@ -92,5 +92,5 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> // Security is explicitly disabled, do not run tests in FIPS mode tasks.withType(Test).configureEach { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 271aadfe4b388..60fb55e9a2593 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -31,7 +31,7 @@ tasks.register("copyTestNodeKeyMaterial", Copy) { into outputDir } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index b7268af807535..626693a8f295f 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -55,7 +55,7 @@ if (!jiraUrl && !jiraUser && !jiraPassword && !jiraProject) { tasks.named("yamlRestTest")configure { finalizedBy "cleanJira" } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("yamlRestTest").configure{ enabled = false } } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index 4b5a0bbeeeb4a..86ed67ccbb2d6 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -28,7 +28,7 @@ if (!pagerDutyServiceKey) { } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index b2b0478da0471..ff501a7c99c9b 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -28,7 +28,7 @@ if (!slackUrl) { } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("yamlRestTest").configure{enabled = false } }