diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml
index 6c8b8edfcbac..4bc72aec2097 100644
--- a/.buildkite/pipelines/intake.yml
+++ b/.buildkite/pipelines/intake.yml
@@ -56,7 +56,7 @@ steps:
timeout_in_minutes: 300
matrix:
setup:
- BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"]
+ BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml
index 69d11ef1dabb..3d6095d0b9e6 100644
--- a/.buildkite/pipelines/periodic.yml
+++ b/.buildkite/pipelines/periodic.yml
@@ -448,7 +448,7 @@ steps:
setup:
ES_RUNTIME_JAVA:
- openjdk21
- BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"]
+ BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
@@ -490,7 +490,7 @@ steps:
ES_RUNTIME_JAVA:
- openjdk21
- openjdk23
- BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"]
+ BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions
index 5514fc376a28..f92881da7fea 100644
--- a/.ci/snapshotBwcVersions
+++ b/.ci/snapshotBwcVersions
@@ -1,4 +1,5 @@
BWC_VERSION:
+ - "8.15.6"
- "8.16.2"
- "8.17.0"
- "8.18.0"
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy
index 6d080e1c8076..bb100b6b2388 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy
@@ -9,9 +9,10 @@
package org.elasticsearch.gradle.internal
+import spock.lang.Unroll
+
import org.elasticsearch.gradle.fixtures.AbstractGitAwareGradleFuncTest
import org.gradle.testkit.runner.TaskOutcome
-import spock.lang.Unroll
class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest {
@@ -23,8 +24,10 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
apply plugin: 'elasticsearch.internal-distribution-bwc-setup'
"""
execute("git branch origin/8.x", file("cloned"))
+ execute("git branch origin/8.3", file("cloned"))
+ execute("git branch origin/8.2", file("cloned"))
+ execute("git branch origin/8.1", file("cloned"))
execute("git branch origin/7.16", file("cloned"))
- execute("git branch origin/7.15", file("cloned"))
}
def "builds distribution from branches via archives extractedAssemble"() {
@@ -48,10 +51,11 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:darwin-tar:${expectedAssembleTaskName}")
where:
- bwcDistVersion | bwcProject | expectedAssembleTaskName
- "8.0.0" | "minor" | "extractedAssemble"
- "7.16.0" | "staged" | "extractedAssemble"
- "7.15.2" | "bugfix" | "extractedAssemble"
+ bwcDistVersion | bwcProject | expectedAssembleTaskName
+ "8.4.0" | "minor" | "extractedAssemble"
+ "8.3.0" | "staged" | "extractedAssemble"
+ "8.2.1" | "bugfix" | "extractedAssemble"
+ "8.1.3" | "bugfix2" | "extractedAssemble"
}
@Unroll
@@ -70,8 +74,8 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
where:
bwcDistVersion | platform
- "8.0.0" | "darwin"
- "8.0.0" | "linux"
+ "8.4.0" | "darwin"
+ "8.4.0" | "linux"
}
def "bwc expanded distribution folder can be resolved as bwc project artifact"() {
@@ -107,11 +111,11 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF
result.task(":resolveExpandedDistribution").outcome == TaskOutcome.SUCCESS
result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS
and: "assemble task triggered"
- result.output.contains("[8.0.0] > Task :distribution:archives:darwin-tar:extractedAssemble")
+ result.output.contains("[8.4.0] > Task :distribution:archives:darwin-tar:extractedAssemble")
result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.x/" +
"distribution/archives/darwin-tar/build/install")
result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.x/" +
- "distribution/archives/darwin-tar/build/install/elasticsearch-8.0.0-SNAPSHOT")
+ "distribution/archives/darwin-tar/build/install/elasticsearch-8.4.0-SNAPSHOT")
}
}
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy
index eb6185e5aed5..fc5d432a9ef9 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy
@@ -57,7 +57,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest
elasticsearch_distributions {
test_distro {
- version = "8.0.0"
+ version = "8.4.0"
type = "archive"
platform = "linux"
architecture = Architecture.current();
@@ -87,7 +87,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest
elasticsearch_distributions {
test_distro {
- version = "8.0.0"
+ version = "8.4.0"
type = "archive"
platform = "linux"
architecture = Architecture.current();
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy
index e3efe3d7ffbf..15b057a05e03 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy
@@ -40,7 +40,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
given:
internalBuild()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -61,11 +61,11 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
result.task(transformTask).outcome == TaskOutcome.NO_SOURCE
}
- def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:staged"() {
+ def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:minor"() {
given:
internalBuild()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -98,8 +98,8 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
String api = "foo.json"
String test = "10_basic.yml"
//add the compatible test and api files, these are the prior version's normal yaml rest tests
- file("distribution/bwc/staged/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << ""
- file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << ""
+ file("distribution/bwc/minor/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << ""
+ file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << ""
when:
def result = gradleRunner("yamlRestCompatTest").build()
@@ -145,7 +145,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
given:
internalBuild()
withVersionCatalogue()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -186,7 +186,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
given:
internalBuild()
- subProject(":distribution:bwc:staged") << """
+ subProject(":distribution:bwc:minor") << """
configurations { checkout }
artifacts {
checkout(new File(projectDir, "checkoutDir"))
@@ -230,7 +230,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe
setupRestResources([], [])
- file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """
+ file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """
"one":
- do:
do_.some.key_to_replace:
diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle
index 8c321294b585..e931537fcd6e 100644
--- a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle
+++ b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle
@@ -10,9 +10,11 @@
rootProject.name = "root"
include ":distribution:bwc:bugfix"
+include ":distribution:bwc:bugfix2"
include ":distribution:bwc:minor"
include ":distribution:bwc:major"
include ":distribution:bwc:staged"
+include ":distribution:bwc:maintenance"
include ":distribution:archives:darwin-tar"
include ":distribution:archives:oss-darwin-tar"
include ":distribution:archives:darwin-aarch64-tar"
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
index 93c2623a23d3..37b28389ad97 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
@@ -21,14 +21,15 @@
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
-import java.util.TreeSet;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import static java.util.Collections.reverseOrder;
import static java.util.Collections.unmodifiableList;
+import static java.util.Comparator.comparing;
/**
* A container for elasticsearch supported version information used in BWC testing.
@@ -73,11 +74,11 @@ public class BwcVersions implements Serializable {
private final transient List versions;
private final Map unreleased;
- public BwcVersions(List versionLines) {
- this(versionLines, Version.fromString(VersionProperties.getElasticsearch()));
+ public BwcVersions(List versionLines, List developmentBranches) {
+ this(versionLines, Version.fromString(VersionProperties.getElasticsearch()), developmentBranches);
}
- public BwcVersions(Version currentVersionProperty, List allVersions) {
+ public BwcVersions(Version currentVersionProperty, List allVersions, List developmentBranches) {
if (allVersions.isEmpty()) {
throw new IllegalArgumentException("Could not parse any versions");
}
@@ -86,12 +87,12 @@ public BwcVersions(Version currentVersionProperty, List allVersions) {
this.currentVersion = allVersions.get(allVersions.size() - 1);
assertCurrentVersionMatchesParsed(currentVersionProperty);
- this.unreleased = computeUnreleased();
+ this.unreleased = computeUnreleased(developmentBranches);
}
// Visible for testing
- BwcVersions(List versionLines, Version currentVersionProperty) {
- this(currentVersionProperty, parseVersionLines(versionLines));
+ BwcVersions(List versionLines, Version currentVersionProperty, List developmentBranches) {
+ this(currentVersionProperty, parseVersionLines(versionLines), developmentBranches);
}
private static List parseVersionLines(List versionLines) {
@@ -126,58 +127,77 @@ public void forPreviousUnreleased(Consumer consumer) {
getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).map(unreleased::get).forEach(consumer);
}
- private String getBranchFor(Version version) {
- if (version.equals(currentVersion)) {
- // Just assume the current branch is 'main'. It's actually not important, we never check out the current branch.
- return "main";
- } else {
+ private String getBranchFor(Version version, List developmentBranches) {
+ // If the current version matches a specific feature freeze branch, use that
+ if (developmentBranches.contains(version.getMajor() + "." + version.getMinor())) {
return version.getMajor() + "." + version.getMinor();
+ } else if (developmentBranches.contains(version.getMajor() + ".x")) { // Otherwise if an n.x branch exists and we are that major
+ return version.getMajor() + ".x";
+ } else { // otherwise we're the main branch
+ return "main";
}
}
- private Map computeUnreleased() {
- Set unreleased = new TreeSet<>();
- // The current version is being worked, is always unreleased
- unreleased.add(currentVersion);
- // Recurse for all unreleased versions starting from the current version
- addUnreleased(unreleased, currentVersion, 0);
+ private Map computeUnreleased(List developmentBranches) {
+ Map result = new TreeMap<>();
- // Grab the latest version from the previous major if necessary as well, this is going to be a maintenance release
- Version maintenance = versions.stream()
- .filter(v -> v.getMajor() == currentVersion.getMajor() - 1)
- .max(Comparator.naturalOrder())
- .orElseThrow();
- // This is considered the maintenance release only if we haven't yet encountered it
- boolean hasMaintenanceRelease = unreleased.add(maintenance);
+ // The current version is always in development
+ String currentBranch = getBranchFor(currentVersion, developmentBranches);
+ result.put(currentVersion, new UnreleasedVersionInfo(currentVersion, currentBranch, ":distribution"));
+
+ // Check for an n.x branch as well
+ if (currentBranch.equals("main") && developmentBranches.stream().anyMatch(s -> s.endsWith(".x"))) {
+ // This should correspond to the latest new minor
+ Version version = versions.stream()
+ .sorted(Comparator.reverseOrder())
+ .filter(v -> v.getMajor() == (currentVersion.getMajor() - 1) && v.getRevision() == 0)
+ .findFirst()
+ .orElseThrow(() -> new IllegalStateException("Unable to determine development version for branch"));
+ String branch = getBranchFor(version, developmentBranches);
+ assert branch.equals(currentVersion.getMajor() - 1 + ".x") : "Expected branch does not match development branch";
+
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:minor"));
+ }
- List unreleasedList = unreleased.stream().sorted(Comparator.reverseOrder()).toList();
- Map result = new TreeMap<>();
- boolean newMinor = false;
- for (int i = 0; i < unreleasedList.size(); i++) {
- Version esVersion = unreleasedList.get(i);
- // This is either a new minor or staged release
- if (currentVersion.equals(esVersion)) {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution"));
- } else if (esVersion.getRevision() == 0) {
- // If there are two upcoming unreleased minors then this one is the new minor
- if (newMinor == false && unreleasedList.get(i + 1).getRevision() == 0) {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor"));
- newMinor = true;
- } else if (newMinor == false
- && unreleasedList.stream().filter(v -> v.getMajor() == esVersion.getMajor() && v.getRevision() == 0).count() == 1) {
- // This is the only unreleased new minor which means we've not yet staged it for release
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor"));
- newMinor = true;
- } else {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged"));
- }
- } else {
- // If this is the oldest unreleased version and we have a maintenance release
- if (i == unreleasedList.size() - 1 && hasMaintenanceRelease) {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:maintenance"));
- } else {
- result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:bugfix"));
- }
+ // Now handle all the feature freeze branches
+ List featureFreezeBranches = developmentBranches.stream()
+ .filter(b -> Pattern.matches("[0-9]+\\.[0-9]+", b))
+ .sorted(reverseOrder(comparing(s -> Version.fromString(s, Version.Mode.RELAXED))))
+ .toList();
+
+ boolean existingBugfix = false;
+ for (int i = 0; i < featureFreezeBranches.size(); i++) {
+ String branch = featureFreezeBranches.get(i);
+ Version version = versions.stream()
+ .sorted(Comparator.reverseOrder())
+ .filter(v -> v.toString().startsWith(branch))
+ .findFirst()
+ .orElse(null);
+
+ // If we don't know about this version we can ignore it
+ if (version == null) {
+ continue;
+ }
+
+ // If this is the current version we can ignore as we've already handled it
+ if (version.equals(currentVersion)) {
+ continue;
+ }
+
+ // We only maintain compatibility back one major so ignore anything older
+ if (currentVersion.getMajor() - version.getMajor() > 1) {
+ continue;
+ }
+
+ // This is the maintenance version
+ if (i == featureFreezeBranches.size() - 1) {
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:maintenance"));
+ } else if (version.getRevision() == 0) { // This is the next staged minor
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:staged"));
+ } else { // This is a bugfix
+ String project = existingBugfix ? "bugfix2" : "bugfix";
+ result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:" + project));
+ existingBugfix = true;
}
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
index 0535026b2594..27d2a66feb20 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
@@ -8,6 +8,9 @@
*/
package org.elasticsearch.gradle.internal.info;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
import org.apache.commons.io.IOUtils;
import org.elasticsearch.gradle.VersionProperties;
import org.elasticsearch.gradle.internal.BwcVersions;
@@ -44,11 +47,13 @@
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
+import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Random;
@@ -68,6 +73,7 @@ public class GlobalBuildInfoPlugin implements Plugin {
private final JavaInstallationRegistry javaInstallationRegistry;
private final JvmMetadataDetector metadataDetector;
private final ProviderFactory providers;
+ private final ObjectMapper objectMapper;
private JavaToolchainService toolChainService;
private Project project;
@@ -82,7 +88,7 @@ public GlobalBuildInfoPlugin(
this.javaInstallationRegistry = javaInstallationRegistry;
this.metadataDetector = new ErrorTraceMetadataDetector(metadataDetector);
this.providers = providers;
-
+ this.objectMapper = new ObjectMapper();
}
@Override
@@ -190,12 +196,27 @@ private BwcVersions resolveBwcVersions() {
);
try (var is = new FileInputStream(versionsFilePath)) {
List versionLines = IOUtils.readLines(is, "UTF-8");
- return new BwcVersions(versionLines);
+ return new BwcVersions(versionLines, getDevelopmentBranches());
} catch (IOException e) {
throw new IllegalStateException("Unable to resolve to resolve bwc versions from versionsFile.", e);
}
}
+ private List getDevelopmentBranches() {
+ List branches = new ArrayList<>();
+ File branchesFile = new File(Util.locateElasticsearchWorkspace(project.getGradle()), "branches.json");
+ try (InputStream is = new FileInputStream(branchesFile)) {
+ JsonNode json = objectMapper.readTree(is);
+ for (JsonNode node : json.get("branches")) {
+ branches.add(node.get("branch").asText());
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+
+ return branches;
+ }
+
private void logGlobalBuildInfo(BuildParameterExtension buildParams) {
final String osName = System.getProperty("os.name");
final String osVersion = System.getProperty("os.version");
diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy
index 9c7d20d84a67..4d033564a42b 100644
--- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy
+++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy
@@ -17,8 +17,9 @@ import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo
class BwcVersionsSpec extends Specification {
List versionLines = []
- def "current version is next minor with next major and last minor both staged"() {
+ def "current version is next major"() {
given:
+ addVersion('7.17.10', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -29,25 +30,25 @@ class BwcVersionsSpec extends Specification {
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
addVersion('9.0.0', '10.0.0')
- addVersion('9.1.0', '10.1.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.1.0'))
+ def bwc = new BwcVersions(versionLines, v('9.0.0'), ['main', '8.x', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'),
(v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
- (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.x', ':distribution:bwc:minor'),
- (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution')
+ (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'),
+ (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('9.0.0'), v('9.1.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0'), v('9.1.0')]
+ bwc.wireCompatible == [v('8.17.0'), v('9.0.0')]
+ bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')]
}
- def "current is next minor with upcoming minor staged"() {
+ def "current version is next major with staged minor"() {
given:
+ addVersion('7.17.10', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -57,53 +58,106 @@ class BwcVersionsSpec extends Specification {
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
addVersion('9.0.0', '10.0.0')
- addVersion('9.1.0', '10.1.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.1.0'))
+ def bwc = new BwcVersions(versionLines, v('9.0.0'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
+ (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution:bwc:minor'),
+ (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ ]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0')]
+ bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0')]
+ }
+
+ def "current version is first new minor in major series"() {
+ given:
+ addVersion('7.17.10', '8.9.0')
+ addVersion('8.16.0', '9.10.0')
+ addVersion('8.16.1', '9.10.0')
+ addVersion('8.17.0', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
+ addVersion('9.0.0', '10.0.0')
+ addVersion('9.1.0', '10.0.0')
+
+ when:
+ def bwc = new BwcVersions(versionLines, v('9.1.0'), ['main', '9.0', '8.18'])
+ def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
+
+ then:
+ unreleased == [
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'),
(v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.0', ':distribution:bwc:staged'),
- (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution')
+ (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.1.0')]
+ bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.1.0')]
}
- def "current version is staged major"() {
+ def "current version is new minor with single bugfix"() {
given:
- addVersion('8.14.0', '9.9.0')
- addVersion('8.14.1', '9.9.0')
- addVersion('8.14.2', '9.9.0')
- addVersion('8.15.0', '9.9.0')
- addVersion('8.15.1', '9.9.0')
- addVersion('8.15.2', '9.9.0')
+ addVersion('7.17.10', '8.9.0')
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
addVersion('9.0.0', '10.0.0')
+ addVersion('9.0.1', '10.0.0')
+ addVersion('9.1.0', '10.0.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.0'))
+ def bwc = new BwcVersions(versionLines, v('9.1.0'), ['main', '9.0', '8.18'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'),
+ (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'),
+ (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0')]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
+ bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
}
- def "current version is major with unreleased next minor"() {
+ def "current version is new minor with single bugfix and staged minor"() {
given:
+ addVersion('7.17.10', '8.9.0')
+ addVersion('8.16.0', '9.10.0')
+ addVersion('8.16.1', '9.10.0')
+ addVersion('8.17.0', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
+ addVersion('9.0.0', '10.0.0')
+ addVersion('9.0.1', '10.0.0')
+ addVersion('9.1.0', '10.0.0')
+ addVersion('9.2.0', '10.0.0')
+
+ when:
+ def bwc = new BwcVersions(versionLines, v('9.2.0'), ['main', '9.1', '9.0', '8.18'])
+ def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
+
+ then:
+ unreleased == [
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'),
+ (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'),
+ (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), '9.1', ':distribution:bwc:staged'),
+ (v('9.2.0')): new UnreleasedVersionInfo(v('9.2.0'), 'main', ':distribution'),
+ ]
+ bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0'), v('9.2.0')]
+ bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0'), v('9.2.0')]
+ }
+
+ def "current version is next minor"() {
+ given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -113,24 +167,29 @@ class BwcVersionsSpec extends Specification {
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('9.0.0', '10.0.0')
+ addVersion('8.17.1', '9.10.0')
+ addVersion('8.18.0', '9.10.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.0'))
+ def bwc = new BwcVersions(versionLines, v('8.18.0'), ['main', '8.x', '8.17', '8.16', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
- (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix2'),
+ (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('9.0.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('8.18.0')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('8.18.0')]
}
- def "current version is major with staged next minor"() {
+ def "current version is new minor with staged minor"() {
given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -138,26 +197,31 @@ class BwcVersionsSpec extends Specification {
addVersion('8.15.1', '9.9.0')
addVersion('8.15.2', '9.9.0')
addVersion('8.16.0', '9.10.0')
+ addVersion('8.16.1', '9.10.0')
addVersion('8.17.0', '9.10.0')
- addVersion('9.0.0', '10.0.0')
+ addVersion('8.18.0', '9.10.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.0'))
+ def bwc = new BwcVersions(versionLines, v('8.18.0'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'),
- (v('8.16.0')): new UnreleasedVersionInfo(v('8.16.0'), '8.16', ':distribution:bwc:staged'),
- (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'),
- (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'),
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'),
+ (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'),
+ (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('9.0.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.17.0'), v('9.0.0')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0')]
}
- def "current version is next bugfix"() {
+ def "current version is first bugfix"() {
given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
@@ -166,52 +230,44 @@ class BwcVersionsSpec extends Specification {
addVersion('8.15.2', '9.9.0')
addVersion('8.16.0', '9.10.0')
addVersion('8.16.1', '9.10.0')
- addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
- addVersion('9.0.0', '10.0.0')
- addVersion('9.0.1', '10.0.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.0.1'))
+ def bwc = new BwcVersions(versionLines, v('8.16.1'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'),
- (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), 'main', ':distribution'),
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'),
+ (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1')]
}
- def "current version is next minor with no staged releases"() {
+ def "current version is second bugfix"() {
given:
+ addVersion('7.16.3', '8.9.0')
+ addVersion('7.17.0', '8.9.0')
+ addVersion('7.17.1', '8.9.0')
addVersion('8.14.0', '9.9.0')
addVersion('8.14.1', '9.9.0')
addVersion('8.14.2', '9.9.0')
addVersion('8.15.0', '9.9.0')
addVersion('8.15.1', '9.9.0')
addVersion('8.15.2', '9.9.0')
- addVersion('8.16.0', '9.10.0')
- addVersion('8.16.1', '9.10.0')
- addVersion('8.17.0', '9.10.0')
- addVersion('8.17.1', '9.10.0')
- addVersion('9.0.0', '10.0.0')
- addVersion('9.0.1', '10.0.0')
- addVersion('9.1.0', '10.1.0')
when:
- def bwc = new BwcVersions(versionLines, v('9.1.0'))
+ def bwc = new BwcVersions(versionLines, v('8.15.2'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17'])
def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] }
then:
unreleased == [
- (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'),
- (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'),
- (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution')
+ (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'),
+ (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution'),
]
- bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
- bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')]
+ bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2')]
+ bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2')]
}
private void addVersion(String elasticsearch, String lucene) {
diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java
index 639dec280ae9..7512fa20814c 100644
--- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java
+++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java
@@ -16,6 +16,7 @@
import java.io.File;
import java.util.Arrays;
+import java.util.List;
public class AbstractDistributionDownloadPluginTests {
protected static Project rootProject;
@@ -28,22 +29,27 @@ public class AbstractDistributionDownloadPluginTests {
protected static final Version BWC_STAGED_VERSION = Version.fromString("1.0.0");
protected static final Version BWC_BUGFIX_VERSION = Version.fromString("1.0.1");
protected static final Version BWC_MAINTENANCE_VERSION = Version.fromString("0.90.1");
+ protected static final List DEVELOPMENT_BRANCHES = Arrays.asList("main", "1.1", "1.0", "0.90");
protected static final BwcVersions BWC_MINOR = new BwcVersions(
BWC_MAJOR_VERSION,
- Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)
+ Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_STAGED = new BwcVersions(
BWC_MAJOR_VERSION,
- Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)
+ Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_BUGFIX = new BwcVersions(
BWC_MAJOR_VERSION,
- Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)
+ Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_MAINTENANCE = new BwcVersions(
BWC_MINOR_VERSION,
- Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION)
+ Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION),
+ DEVELOPMENT_BRANCHES
);
protected static String projectName(String base, boolean bundledJdk) {
diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
index f3f8e4703eba..07214b5fbf84 100644
--- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
+++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
@@ -156,12 +156,12 @@ abstract class AbstractGradleFuncTest extends Specification {
File internalBuild(
List extraPlugins = [],
- String bugfix = "7.15.2",
- String bugfixLucene = "8.9.0",
- String staged = "7.16.0",
- String stagedLucene = "8.10.0",
- String minor = "8.0.0",
- String minorLucene = "9.0.0"
+ String maintenance = "7.16.10",
+ String bugfix2 = "8.1.3",
+ String bugfix = "8.2.1",
+ String staged = "8.3.0",
+ String minor = "8.4.0",
+ String current = "9.0.0"
) {
buildFile << """plugins {
id 'elasticsearch.global-build-info'
@@ -172,15 +172,17 @@ abstract class AbstractGradleFuncTest extends Specification {
import org.elasticsearch.gradle.internal.BwcVersions
import org.elasticsearch.gradle.Version
- Version currentVersion = Version.fromString("8.1.0")
+ Version currentVersion = Version.fromString("${current}")
def versionList = [
+ Version.fromString("$maintenance"),
+ Version.fromString("$bugfix2"),
Version.fromString("$bugfix"),
Version.fromString("$staged"),
Version.fromString("$minor"),
currentVersion
]
- BwcVersions versions = new BwcVersions(currentVersion, versionList)
+ BwcVersions versions = new BwcVersions(currentVersion, versionList, ['main', '8.x', '8.3', '8.2', '8.1', '7.16'])
buildParams.getBwcVersionsProperty().set(versions)
"""
}
diff --git a/distribution/bwc/bugfix2/build.gradle b/distribution/bwc/bugfix2/build.gradle
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/changelog/114445.yaml b/docs/changelog/114445.yaml
new file mode 100644
index 000000000000..afbc080d1e0b
--- /dev/null
+++ b/docs/changelog/114445.yaml
@@ -0,0 +1,6 @@
+pr: 114445
+summary: Wrap jackson exception on malformed json string
+area: Infra/Core
+type: bug
+issues:
+ - 114142
diff --git a/docs/changelog/117792.yaml b/docs/changelog/117792.yaml
new file mode 100644
index 000000000000..2d7ddda1ace4
--- /dev/null
+++ b/docs/changelog/117792.yaml
@@ -0,0 +1,6 @@
+pr: 117792
+summary: Address mapping and compute engine runtime field issues
+area: Mapping
+type: bug
+issues:
+ - 117644
diff --git a/docs/changelog/117914.yaml b/docs/changelog/117914.yaml
new file mode 100644
index 000000000000..da58ed7bb04b
--- /dev/null
+++ b/docs/changelog/117914.yaml
@@ -0,0 +1,5 @@
+pr: 117914
+summary: Fix for propagating filters from compound to inner retrievers
+area: Ranking
+type: bug
+issues: []
diff --git a/docs/changelog/117963.yaml b/docs/changelog/117963.yaml
new file mode 100644
index 000000000000..4a50dc175786
--- /dev/null
+++ b/docs/changelog/117963.yaml
@@ -0,0 +1,5 @@
+pr: 117963
+summary: '`SearchServiceTests.testParseSourceValidation` failure'
+area: Search
+type: bug
+issues: []
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index ed93c290b6ad..4f82889f562d 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -10,7 +10,6 @@ Creates an {infer} endpoint to perform an {infer} task.
* For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>.
====
-
[discrete]
[[put-inference-api-request]]
==== {api-request-title}
@@ -47,6 +46,14 @@ Refer to the service list in the <> API. In the response, look for `"state": "fully_allocated"` and ensure the `"allocation_count"` matches the `"target_allocation_count"`.
+* Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
+====
+
+
The following services are available through the {infer} API.
You can find the available task types next to the service name.
Click the links to review the configuration details of the services:
diff --git a/libs/entitlement/src/main/java/module-info.java b/libs/entitlement/src/main/java/module-info.java
index 54075ba60bbe..b8a125b98e64 100644
--- a/libs/entitlement/src/main/java/module-info.java
+++ b/libs/entitlement/src/main/java/module-info.java
@@ -17,6 +17,7 @@
requires static org.elasticsearch.entitlement.bridge; // At runtime, this will be in java.base
exports org.elasticsearch.entitlement.runtime.api;
+ exports org.elasticsearch.entitlement.runtime.policy;
exports org.elasticsearch.entitlement.instrumentation;
exports org.elasticsearch.entitlement.bootstrap to org.elasticsearch.server;
exports org.elasticsearch.entitlement.initialization to java.base;
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
index 0ffab5f93969..fb694308466c 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
@@ -18,6 +18,8 @@
import org.elasticsearch.entitlement.instrumentation.MethodKey;
import org.elasticsearch.entitlement.instrumentation.Transformer;
import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker;
+import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement;
+import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement;
import org.elasticsearch.entitlement.runtime.policy.Policy;
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
import org.elasticsearch.entitlement.runtime.policy.PolicyParser;
@@ -86,9 +88,11 @@ private static Class> internalNameToClass(String internalName) {
private static PolicyManager createPolicyManager() throws IOException {
Map pluginPolicies = createPluginPolicies(EntitlementBootstrap.bootstrapArgs().pluginData());
- // TODO: What should the name be?
// TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it
- var serverPolicy = new Policy("server", List.of());
+ var serverPolicy = new Policy(
+ "server",
+ List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())))
+ );
return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver());
}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
index 28a080470c04..aa63b630ed7c 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
@@ -10,7 +10,6 @@
package org.elasticsearch.entitlement.runtime.api;
import org.elasticsearch.entitlement.bridge.EntitlementChecker;
-import org.elasticsearch.entitlement.runtime.policy.FlagEntitlementType;
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
import java.net.URL;
@@ -30,27 +29,27 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) {
@Override
public void check$java_lang_System$exit(Class> callerClass, int status) {
- policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.SYSTEM_EXIT);
+ policyManager.checkExitVM(callerClass);
}
@Override
public void check$java_net_URLClassLoader$(Class> callerClass, URL[] urls) {
- policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER);
+ policyManager.checkCreateClassLoader(callerClass);
}
@Override
public void check$java_net_URLClassLoader$(Class> callerClass, URL[] urls, ClassLoader parent) {
- policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER);
+ policyManager.checkCreateClassLoader(callerClass);
}
@Override
public void check$java_net_URLClassLoader$(Class> callerClass, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory) {
- policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER);
+ policyManager.checkCreateClassLoader(callerClass);
}
@Override
public void check$java_net_URLClassLoader$(Class> callerClass, String name, URL[] urls, ClassLoader parent) {
- policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER);
+ policyManager.checkCreateClassLoader(callerClass);
}
@Override
@@ -61,6 +60,6 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) {
ClassLoader parent,
URLStreamHandlerFactory factory
) {
- policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER);
+ policyManager.checkCreateClassLoader(callerClass);
}
}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java
index 708e0b87711f..138515be9ffc 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java
@@ -12,5 +12,4 @@
public class CreateClassLoaderEntitlement implements Entitlement {
@ExternalEntitlement
public CreateClassLoaderEntitlement() {}
-
}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java
similarity index 79%
rename from libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java
rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java
index d40235ee1216..c4a8fc683358 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java
@@ -9,7 +9,7 @@
package org.elasticsearch.entitlement.runtime.policy;
-public enum FlagEntitlementType {
- SYSTEM_EXIT,
- CREATE_CLASSLOADER;
-}
+/**
+ * Internal policy type (not-parseable -- not available to plugins).
+ */
+public class ExitVMEntitlement implements Entitlement {}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java
index 8df199591d3e..d0837bc09618 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java
@@ -20,6 +20,9 @@ public class FileEntitlement implements Entitlement {
public static final int READ_ACTION = 0x1;
public static final int WRITE_ACTION = 0x2;
+ public static final String READ = "read";
+ public static final String WRITE = "write";
+
private final String path;
private final int actions;
@@ -29,12 +32,12 @@ public FileEntitlement(String path, List actionsList) {
int actionsInt = 0;
for (String actionString : actionsList) {
- if ("read".equals(actionString)) {
+ if (READ.equals(actionString)) {
if ((actionsInt & READ_ACTION) == READ_ACTION) {
throw new IllegalArgumentException("file action [read] specified multiple times");
}
actionsInt |= READ_ACTION;
- } else if ("write".equals(actionString)) {
+ } else if (WRITE.equals(actionString)) {
if ((actionsInt & WRITE_ACTION) == WRITE_ACTION) {
throw new IllegalArgumentException("file action [write] specified multiple times");
}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
index b3fb5b75a1d5..a77c86d5ffd0 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
@@ -17,17 +17,45 @@
import java.lang.module.ModuleFinder;
import java.lang.module.ModuleReference;
+import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
public class PolicyManager {
private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class);
+ static class ModuleEntitlements {
+ public static final ModuleEntitlements NONE = new ModuleEntitlements(List.of());
+ private final IdentityHashMap, List> entitlementsByType;
+
+ ModuleEntitlements(List entitlements) {
+ this.entitlementsByType = entitlements.stream()
+ .collect(Collectors.toMap(Entitlement::getClass, e -> new ArrayList<>(List.of(e)), (a, b) -> {
+ a.addAll(b);
+ return a;
+ }, IdentityHashMap::new));
+ }
+
+ public boolean hasEntitlement(Class extends Entitlement> entitlementClass) {
+ return entitlementsByType.containsKey(entitlementClass);
+ }
+
+ public Stream getEntitlements(Class entitlementClass) {
+ return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast);
+ }
+ }
+
+ final Map moduleEntitlementsMap = new HashMap<>();
+
protected final Policy serverPolicy;
protected final Map pluginPolicies;
private final Function, String> pluginResolver;
@@ -56,27 +84,110 @@ public PolicyManager(Policy defaultPolicy, Map pluginPolicies, F
this.pluginResolver = pluginResolver;
}
- public void checkFlagEntitlement(Class> callerClass, FlagEntitlementType type) {
+ private static List lookupEntitlementsForModule(Policy policy, String moduleName) {
+ for (int i = 0; i < policy.scopes.size(); ++i) {
+ var scope = policy.scopes.get(i);
+ if (scope.name.equals(moduleName)) {
+ return scope.entitlements;
+ }
+ }
+ return null;
+ }
+
+ public void checkExitVM(Class> callerClass) {
+ checkEntitlementPresent(callerClass, ExitVMEntitlement.class);
+ }
+
+ public void checkCreateClassLoader(Class> callerClass) {
+ checkEntitlementPresent(callerClass, CreateClassLoaderEntitlement.class);
+ }
+
+ private void checkEntitlementPresent(Class> callerClass, Class extends Entitlement> entitlementClass) {
var requestingModule = requestingModule(callerClass);
if (isTriviallyAllowed(requestingModule)) {
return;
}
- // TODO: real policy check. For now, we only allow our hardcoded System.exit policy for server.
- // TODO: this will be checked using policies
- if (requestingModule.isNamed()
- && requestingModule.getName().equals("org.elasticsearch.server")
- && (type == FlagEntitlementType.SYSTEM_EXIT || type == FlagEntitlementType.CREATE_CLASSLOADER)) {
- logger.debug("Allowed: caller [{}] in module [{}] has entitlement [{}]", callerClass, requestingModule.getName(), type);
+ ModuleEntitlements entitlements = getEntitlementsOrThrow(callerClass, requestingModule);
+ if (entitlements.hasEntitlement(entitlementClass)) {
+ logger.debug(
+ () -> Strings.format(
+ "Entitled: caller [%s], module [%s], type [%s]",
+ callerClass,
+ requestingModule.getName(),
+ entitlementClass.getSimpleName()
+ )
+ );
return;
}
-
- // TODO: plugins policy check using pluginResolver and pluginPolicies
throw new NotEntitledException(
- Strings.format("Missing entitlement [%s] for caller [%s] in module [%s]", type, callerClass, requestingModule.getName())
+ Strings.format(
+ "Missing entitlement: caller [%s], module [%s], type [%s]",
+ callerClass,
+ requestingModule.getName(),
+ entitlementClass.getSimpleName()
+ )
);
}
+ ModuleEntitlements getEntitlementsOrThrow(Class> callerClass, Module requestingModule) {
+ ModuleEntitlements cachedEntitlement = moduleEntitlementsMap.get(requestingModule);
+ if (cachedEntitlement != null) {
+ if (cachedEntitlement == ModuleEntitlements.NONE) {
+ throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule) + "[CACHED]");
+ }
+ return cachedEntitlement;
+ }
+
+ if (isServerModule(requestingModule)) {
+ var scopeName = requestingModule.getName();
+ return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverPolicy, scopeName);
+ }
+
+ // plugins
+ var pluginName = pluginResolver.apply(callerClass);
+ if (pluginName != null) {
+ var pluginPolicy = pluginPolicies.get(pluginName);
+ if (pluginPolicy != null) {
+ final String scopeName;
+ if (requestingModule.isNamed() == false) {
+ scopeName = ALL_UNNAMED;
+ } else {
+ scopeName = requestingModule.getName();
+ }
+ return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginPolicy, scopeName);
+ }
+ }
+
+ moduleEntitlementsMap.put(requestingModule, ModuleEntitlements.NONE);
+ throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule));
+ }
+
+ private static String buildModuleNoPolicyMessage(Class> callerClass, Module requestingModule) {
+ return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName());
+ }
+
+ private ModuleEntitlements getModuleEntitlementsOrThrow(Class> callerClass, Module module, Policy policy, String moduleName) {
+ var entitlements = lookupEntitlementsForModule(policy, moduleName);
+ if (entitlements == null) {
+ // Module without entitlements - remember we don't have any
+ moduleEntitlementsMap.put(module, ModuleEntitlements.NONE);
+ throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module));
+ }
+ // We have a policy for this module
+ var classEntitlements = createClassEntitlements(entitlements);
+ moduleEntitlementsMap.put(module, classEntitlements);
+ return classEntitlements;
+ }
+
+ private static boolean isServerModule(Module requestingModule) {
+ return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot();
+ }
+
+ private ModuleEntitlements createClassEntitlements(List entitlements) {
+ return new ModuleEntitlements(entitlements);
+ }
+
private static Module requestingModule(Class> callerClass) {
if (callerClass != null) {
Module callerModule = callerClass.getModule();
@@ -102,10 +213,10 @@ private static Module requestingModule(Class> callerClass) {
private static boolean isTriviallyAllowed(Module requestingModule) {
if (requestingModule == null) {
- logger.debug("Trivially allowed: entire call stack is in composed of classes in system modules");
+ logger.debug("Entitlement trivially allowed: entire call stack is in composed of classes in system modules");
return true;
}
- logger.trace("Not trivially allowed");
+ logger.trace("Entitlement not trivially allowed");
return false;
}
diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java
new file mode 100644
index 000000000000..45bdf2e45782
--- /dev/null
+++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java
@@ -0,0 +1,247 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.runtime.policy;
+
+import org.elasticsearch.entitlement.runtime.api.NotEntitledException;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.compiler.InMemoryJavaCompiler;
+import org.elasticsearch.test.jar.JarUtils;
+
+import java.io.IOException;
+import java.lang.module.Configuration;
+import java.lang.module.ModuleFinder;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static java.util.Map.entry;
+import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
+import static org.elasticsearch.test.LambdaMatchers.transformedMatch;
+import static org.hamcrest.Matchers.aMapWithSize;
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.endsWith;
+import static org.hamcrest.Matchers.hasEntry;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+
+@ESTestCase.WithoutSecurityManager
+public class PolicyManagerTests extends ESTestCase {
+
+ public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() {
+ var policyManager = new PolicyManager(
+ createEmptyTestServerPolicy(),
+ Map.of("plugin1", createPluginPolicy("plugin.module")),
+ c -> "plugin1"
+ );
+
+ // Any class from the current module (unnamed) will do
+ var callerClass = this.getClass();
+ var requestingModule = callerClass.getModule();
+
+ var ex = assertThrows(
+ "No policy for the unnamed module",
+ NotEntitledException.class,
+ () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)
+ );
+
+ assertEquals(
+ "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]",
+ ex.getMessage()
+ );
+ assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE));
+ }
+
+ public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() {
+ var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1");
+
+ // Any class from the current module (unnamed) will do
+ var callerClass = this.getClass();
+ var requestingModule = callerClass.getModule();
+
+ var ex = assertThrows(
+ "No policy for this plugin",
+ NotEntitledException.class,
+ () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)
+ );
+
+ assertEquals(
+ "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]",
+ ex.getMessage()
+ );
+ assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE));
+ }
+
+ public void testGetEntitlementsFailureIsCached() {
+ var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1");
+
+ // Any class from the current module (unnamed) will do
+ var callerClass = this.getClass();
+ var requestingModule = callerClass.getModule();
+
+ assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule));
+ assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE));
+
+ // A second time
+ var ex = assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule));
+
+ assertThat(ex.getMessage(), endsWith("[CACHED]"));
+ // Nothing new in the map
+ assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
+ }
+
+ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() {
+ var policyManager = new PolicyManager(
+ createEmptyTestServerPolicy(),
+ Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))),
+ c -> "plugin2"
+ );
+
+ // Any class from the current module (unnamed) will do
+ var callerClass = this.getClass();
+ var requestingModule = callerClass.getModule();
+
+ var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule);
+ assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
+ }
+
+ public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException {
+ var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null);
+
+ // Tests do not run modular, so we cannot use a server class.
+ // But we know that in production code the server module and its classes are in the boot layer.
+ // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is
+ // loaded too early) to mimic a class that would be in the server module.
+ var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
+ var requestingModule = mockServerClass.getModule();
+
+ var ex = assertThrows(
+ "No policy for this module in server",
+ NotEntitledException.class,
+ () -> policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule)
+ );
+
+ assertEquals(
+ "Missing entitlement policy: caller [class com.sun.net.httpserver.HttpServer], module [jdk.httpserver]",
+ ex.getMessage()
+ );
+ assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE));
+ }
+
+ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException {
+ var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null);
+
+ // Tests do not run modular, so we cannot use a server class.
+ // But we know that in production code the server module and its classes are in the boot layer.
+ // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is
+ // loaded too early) to mimic a class that would be in the server module.
+ var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
+ var requestingModule = mockServerClass.getModule();
+
+ var entitlements = policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule);
+ assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
+ assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true));
+ }
+
+ public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOException, ClassNotFoundException {
+ final Path home = createTempDir();
+
+ Path jar = creteMockPluginJar(home);
+
+ var policyManager = new PolicyManager(
+ createEmptyTestServerPolicy(),
+ Map.of("mock-plugin", createPluginPolicy("org.example.plugin")),
+ c -> "mock-plugin"
+ );
+
+ var layer = createLayerForJar(jar, "org.example.plugin");
+ var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B");
+ var requestingModule = mockPluginClass.getModule();
+
+ var entitlements = policyManager.getEntitlementsOrThrow(mockPluginClass, requestingModule);
+ assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
+ assertThat(
+ entitlements.getEntitlements(FileEntitlement.class).toList(),
+ contains(transformedMatch(FileEntitlement::toString, containsString("/test/path")))
+ );
+ }
+
+ public void testGetEntitlementsResultIsCached() {
+ var policyManager = new PolicyManager(
+ createEmptyTestServerPolicy(),
+ Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))),
+ c -> "plugin2"
+ );
+
+ // Any class from the current module (unnamed) will do
+ var callerClass = this.getClass();
+ var requestingModule = callerClass.getModule();
+
+ var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule);
+ assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
+ assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
+ var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get();
+ var entitlementsAgain = policyManager.getEntitlementsOrThrow(callerClass, requestingModule);
+
+ // Nothing new in the map
+ assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
+ assertThat(entitlementsAgain, sameInstance(cachedResult));
+ }
+
+ private static Policy createEmptyTestServerPolicy() {
+ return new Policy("server", List.of());
+ }
+
+ private static Policy createTestServerPolicy(String scopeName) {
+ return new Policy("server", List.of(new Scope(scopeName, List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))));
+ }
+
+ private static Policy createPluginPolicy(String... pluginModules) {
+ return new Policy(
+ "plugin",
+ Arrays.stream(pluginModules)
+ .map(
+ name -> new Scope(
+ name,
+ List.of(new FileEntitlement("/test/path", List.of(FileEntitlement.READ)), new CreateClassLoaderEntitlement())
+ )
+ )
+ .toList()
+ );
+ }
+
+ private static Path creteMockPluginJar(Path home) throws IOException {
+ Path jar = home.resolve("mock-plugin.jar");
+
+ Map sources = Map.ofEntries(
+ entry("module-info", "module org.example.plugin { exports q; }"),
+ entry("q.B", "package q; public class B { }")
+ );
+
+ var classToBytes = InMemoryJavaCompiler.compile(sources);
+ JarUtils.createJarWithEntries(
+ jar,
+ Map.ofEntries(entry("module-info.class", classToBytes.get("module-info")), entry("q/B.class", classToBytes.get("q.B")))
+ );
+ return jar;
+ }
+
+ private static ModuleLayer createLayerForJar(Path jar, String moduleName) {
+ Configuration cf = ModuleLayer.boot().configuration().resolve(ModuleFinder.of(jar), ModuleFinder.of(), Set.of(moduleName));
+ var moduleController = ModuleLayer.defineModulesWithOneLoader(
+ cf,
+ List.of(ModuleLayer.boot()),
+ ClassLoader.getPlatformClassLoader()
+ );
+ return moduleController.layer();
+ }
+}
diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
index d42c56845d03..38ef8bc2e4ef 100644
--- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
+++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
@@ -108,7 +108,11 @@ public String text() throws IOException {
if (currentToken().isValue() == false) {
throwOnNoText();
}
- return parser.getText();
+ try {
+ return parser.getText();
+ } catch (JsonParseException e) {
+ throw newXContentParseException(e);
+ }
}
private void throwOnNoText() {
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java
index c4d0aef0183e..c128af69009b 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java
@@ -123,7 +123,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+ return TransportVersions.V_8_16_0;
}
@Override
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java
index 47ca79e3cb3b..96525d427d3e 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java
@@ -44,7 +44,7 @@
public class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
private static boolean includeSha256(TransportVersion version) {
- return version.isPatchFrom(TransportVersions.V_8_15_0) || version.onOrAfter(TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER);
+ return version.onOrAfter(TransportVersions.V_8_15_0);
}
private static final ParseField DATABASES = new ParseField("databases");
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java
index b6e73f3f33f7..a50fe7dee900 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java
@@ -69,7 +69,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+ return TransportVersions.V_8_16_0;
}
public Map getDatabases() {
@@ -138,7 +138,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+ return TransportVersions.V_8_16_0;
}
}
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
index a26364f9305e..aa48c73cf1d7 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
@@ -138,7 +138,7 @@ public DatabaseConfiguration(StreamInput in) throws IOException {
}
private static Provider readProvider(StreamInput in) throws IOException {
- if (in.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
return in.readNamedWriteable(Provider.class);
} else {
// prior to the above version, everything was always a maxmind, so this half of the if is logical
@@ -154,7 +154,7 @@ public static DatabaseConfiguration parse(XContentParser parser, String id) {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeString(name);
- if (out.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeNamedWriteable(provider);
} else {
if (provider instanceof Maxmind maxmind) {
diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java
index 3fd5cc44a340..1d39b993cef9 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java
@@ -40,6 +40,7 @@
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http.HttpVersion;
+import org.apache.http.ConnectionClosedException;
import org.apache.http.HttpHost;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchException;
@@ -48,6 +49,7 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.IncrementalBulkService;
import org.elasticsearch.action.support.ActionTestUtils;
+import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.SubscribableListener;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RestClient;
@@ -100,6 +102,7 @@
import java.util.Collections;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
@@ -110,6 +113,7 @@
import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
+import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
import static org.elasticsearch.rest.RestStatus.OK;
import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED;
@@ -1039,8 +1043,16 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th
}
}
- public void testRespondAfterClose() throws Exception {
- final String url = "/thing";
+ public void testRespondAfterServiceCloseWithClientCancel() throws Exception {
+ runRespondAfterServiceCloseTest(true);
+ }
+
+ public void testRespondAfterServiceCloseWithServerCancel() throws Exception {
+ runRespondAfterServiceCloseTest(false);
+ }
+
+ private void runRespondAfterServiceCloseTest(boolean clientCancel) throws Exception {
+ final String url = "/" + randomIdentifier();
final CountDownLatch responseReleasedLatch = new CountDownLatch(1);
final SubscribableListener transportClosedFuture = new SubscribableListener<>();
final CountDownLatch handlingRequestLatch = new CountDownLatch(1);
@@ -1066,7 +1078,9 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
- Settings.EMPTY,
+ clientCancel
+ ? Settings.EMPTY
+ : Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), TimeValue.timeValueMillis(1)).build(),
networkService,
threadPool,
xContentRegistry(),
@@ -1082,11 +1096,24 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th
transport.start();
final var address = randomFrom(transport.boundAddress().boundAddresses()).address();
try (var client = RestClient.builder(new HttpHost(address.getAddress(), address.getPort())).build()) {
- client.performRequestAsync(new Request("GET", url), ActionTestUtils.wrapAsRestResponseListener(ActionListener.noop()));
+ final var responseExceptionFuture = new PlainActionFuture();
+ final var cancellable = client.performRequestAsync(
+ new Request("GET", url),
+ ActionTestUtils.wrapAsRestResponseListener(ActionTestUtils.assertNoSuccessListener(responseExceptionFuture::onResponse))
+ );
safeAwait(handlingRequestLatch);
+ if (clientCancel) {
+ threadPool.generic().execute(cancellable::cancel);
+ }
transport.close();
transportClosedFuture.onResponse(null);
safeAwait(responseReleasedLatch);
+ final var responseException = safeGet(responseExceptionFuture);
+ if (clientCancel) {
+ assertThat(responseException, instanceOf(CancellationException.class));
+ } else {
+ assertThat(responseException, instanceOf(ConnectionClosedException.class));
+ }
}
}
}
diff --git a/muted-tests.yml b/muted-tests.yml
index 0b400f420e86..ee5e3dd42236 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -2,12 +2,6 @@ tests:
- class: "org.elasticsearch.client.RestClientSingleHostIntegTests"
issue: "https://github.com/elastic/elasticsearch/issues/102717"
method: "testRequestResetAndAbort"
-- class: org.elasticsearch.xpack.restart.FullClusterRestartIT
- method: testSingleDoc {cluster=UPGRADED}
- issue: https://github.com/elastic/elasticsearch/issues/111434
-- class: org.elasticsearch.xpack.restart.FullClusterRestartIT
- method: testDataStreams {cluster=UPGRADED}
- issue: https://github.com/elastic/elasticsearch/issues/111448
- class: org.elasticsearch.smoketest.WatcherYamlRestIT
method: test {p0=watcher/usage/10_basic/Test watcher usage stats output}
issue: https://github.com/elastic/elasticsearch/issues/112189
@@ -103,9 +97,6 @@ tests:
- class: org.elasticsearch.search.StressSearchServiceReaperIT
method: testStressReaper
issue: https://github.com/elastic/elasticsearch/issues/115816
-- class: org.elasticsearch.search.SearchServiceTests
- method: testParseSourceValidation
- issue: https://github.com/elastic/elasticsearch/issues/115936
- class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests
issue: https://github.com/elastic/elasticsearch/issues/116087
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
@@ -245,6 +236,17 @@ tests:
- class: org.elasticsearch.xpack.restart.QueryBuilderBWCIT
method: testQueryBuilderBWC {p0=UPGRADED}
issue: https://github.com/elastic/elasticsearch/issues/116989
+- class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT
+ method: testReindexWithShutdown
+ issue: https://github.com/elastic/elasticsearch/issues/118040
+- class: org.elasticsearch.packaging.test.ConfigurationTests
+ method: test20HostnameSubstitution
+ issue: https://github.com/elastic/elasticsearch/issues/118028
+- class: org.elasticsearch.packaging.test.ArchiveTests
+ method: test40AutoconfigurationNotTriggeredWhenNodeIsMeantToJoinExistingCluster
+ issue: https://github.com/elastic/elasticsearch/issues/118029
+- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
+ issue: https://github.com/elastic/elasticsearch/issues/117981
# Examples:
#
diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java
index 369d0824bdb2..3faa88339f0a 100644
--- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java
+++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java
@@ -74,8 +74,7 @@ public void testBulkInvalidIndexNameString() throws IOException {
ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus()));
- assertThat(responseException.getMessage(), containsString("could not parse bulk request body"));
- assertThat(responseException.getMessage(), containsString("json_parse_exception"));
+ assertThat(responseException.getMessage(), containsString("x_content_parse_exception"));
assertThat(responseException.getMessage(), containsString("Invalid UTF-8"));
}
diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java
index 3c5c36565420..fcb5c20c2816 100644
--- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -1947,13 +1947,13 @@ private enum ElasticsearchExceptionHandle {
org.elasticsearch.ingest.IngestPipelineException.class,
org.elasticsearch.ingest.IngestPipelineException::new,
182,
- TransportVersions.INGEST_PIPELINE_EXCEPTION_ADDED
+ TransportVersions.V_8_16_0
),
INDEX_RESPONSE_WRAPPER_EXCEPTION(
IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus.class,
IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus::new,
183,
- TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE
+ TransportVersions.V_8_16_0
);
final Class extends ElasticsearchException> exceptionClass;
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index 2e4842912dfa..1a1219825bbb 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -104,78 +104,7 @@ static TransportVersion def(int id) {
public static final TransportVersion V_8_14_0 = def(8_636_00_1);
public static final TransportVersion V_8_15_0 = def(8_702_00_2);
public static final TransportVersion V_8_15_2 = def(8_702_00_3);
- public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15 = def(8_702_00_4);
- public static final TransportVersion ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS = def(8_703_00_0);
- public static final TransportVersion INFERENCE_ADAPTIVE_ALLOCATIONS = def(8_704_00_0);
- public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0);
- public static final TransportVersion ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED = def(8_706_00_0);
- public static final TransportVersion ENRICH_CACHE_STATS_SIZE_ADDED = def(8_707_00_0);
- public static final TransportVersion ENTERPRISE_GEOIP_DOWNLOADER = def(8_708_00_0);
- public static final TransportVersion NODES_STATS_ENUM_SET = def(8_709_00_0);
- public static final TransportVersion MASTER_NODE_METRICS = def(8_710_00_0);
- public static final TransportVersion SEGMENT_LEVEL_FIELDS_STATS = def(8_711_00_0);
- public static final TransportVersion ML_ADD_DETECTION_RULE_PARAMS = def(8_712_00_0);
- public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS = def(8_713_00_0);
- public static final TransportVersion INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN = def(8_714_00_0);
- public static final TransportVersion ESQL_ATTRIBUTE_CACHED_SERIALIZATION = def(8_715_00_0);
- public static final TransportVersion REGISTER_SLM_STATS = def(8_716_00_0);
- public static final TransportVersion ESQL_NESTED_UNSUPPORTED = def(8_717_00_0);
- public static final TransportVersion ESQL_SINGLE_VALUE_QUERY_SOURCE = def(8_718_00_0);
- public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0);
- public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0);
- public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0);
- public static final TransportVersion ZDT_NANOS_SUPPORT_BROKEN = def(8_722_00_0);
- public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0);
- public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0);
- public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0);
- public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0);
- public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0);
- public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0);
- public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0);
- public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0);
- public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0);
- public static final TransportVersion REPOSITORIES_TELEMETRY = def(8_732_00_0);
- public static final TransportVersion ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED = def(8_733_00_0);
- public static final TransportVersion FIELD_CAPS_RESPONSE_INDEX_MODE = def(8_734_00_0);
- public static final TransportVersion GET_DATA_STREAMS_VERBOSE = def(8_735_00_0);
- public static final TransportVersion ESQL_ADD_INDEX_MODE_CONCRETE_INDICES = def(8_736_00_0);
- public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0);
- public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0);
- public static final TransportVersion CCS_TELEMETRY_STATS = def(8_739_00_0);
- public static final TransportVersion GLOBAL_RETENTION_TELEMETRY = def(8_740_00_0);
- public static final TransportVersion ROUTING_TABLE_VERSION_REMOVED = def(8_741_00_0);
- public static final TransportVersion ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION = def(8_742_00_0);
- public static final TransportVersion SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS = def(8_743_00_0);
- public static final TransportVersion ML_INFERENCE_IBM_WATSONX_EMBEDDINGS_ADDED = def(8_744_00_0);
- public static final TransportVersion BULK_INCREMENTAL_STATE = def(8_745_00_0);
- public static final TransportVersion FAILURE_STORE_STATUS_IN_INDEX_RESPONSE = def(8_746_00_0);
- public static final TransportVersion ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS = def(8_747_00_0);
- public static final TransportVersion ML_TELEMETRY_MEMORY_ADDED = def(8_748_00_0);
- public static final TransportVersion ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE = def(8_749_00_0);
- public static final TransportVersion SEMANTIC_TEXT_SEARCH_INFERENCE_ID = def(8_750_00_0);
- public static final TransportVersion ML_INFERENCE_CHUNKING_SETTINGS = def(8_751_00_0);
- public static final TransportVersion SEMANTIC_QUERY_INNER_HITS = def(8_752_00_0);
- public static final TransportVersion RETAIN_ILM_STEP_INFO = def(8_753_00_0);
- public static final TransportVersion ADD_DATA_STREAM_OPTIONS = def(8_754_00_0);
- public static final TransportVersion CCS_REMOTE_TELEMETRY_STATS = def(8_755_00_0);
- public static final TransportVersion ESQL_CCS_EXECUTION_INFO = def(8_756_00_0);
- public static final TransportVersion REGEX_AND_RANGE_INTERVAL_QUERIES = def(8_757_00_0);
- public static final TransportVersion RRF_QUERY_REWRITE = def(8_758_00_0);
- public static final TransportVersion SEARCH_FAILURE_STATS = def(8_759_00_0);
- public static final TransportVersion INGEST_GEO_DATABASE_PROVIDERS = def(8_760_00_0);
- public static final TransportVersion DATE_TIME_DOC_VALUES_LOCALES = def(8_761_00_0);
- public static final TransportVersion FAST_REFRESH_RCO = def(8_762_00_0);
- public static final TransportVersion TEXT_SIMILARITY_RERANKER_QUERY_REWRITE = def(8_763_00_0);
- public static final TransportVersion SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS = def(8_764_00_0);
- public static final TransportVersion RETRIEVERS_TELEMETRY_ADDED = def(8_765_00_0);
- public static final TransportVersion ESQL_CACHED_STRING_SERIALIZATION = def(8_766_00_0);
- public static final TransportVersion CHUNK_SENTENCE_OVERLAP_SETTING_ADDED = def(8_767_00_0);
- public static final TransportVersion OPT_IN_ESQL_CCS_EXECUTION_INFO = def(8_768_00_0);
- public static final TransportVersion QUERY_RULE_TEST_API = def(8_769_00_0);
- public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0);
- public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0);
- public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0);
- public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1);
+ public static final TransportVersion V_8_16_0 = def(8_772_00_1);
public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2);
public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16 = def(8_772_00_3);
public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16 = def(8_772_00_4);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
index e14f229f17ac..d929fb457d5d 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
@@ -118,7 +118,7 @@ public Request(TimeValue masterNodeTimeout, TaskId parentTaskId, EnumSet
public Request(StreamInput in) throws IOException {
super(in);
- this.metrics = in.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS)
+ this.metrics = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)
? in.readEnumSet(Metric.class)
: EnumSet.of(Metric.ALLOCATIONS, Metric.FS);
}
@@ -127,7 +127,7 @@ public Request(StreamInput in) throws IOException {
public void writeTo(StreamOutput out) throws IOException {
assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0);
super.writeTo(out);
- if (out.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeEnumSet(metrics);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java
index d34bc3ec0dc2..c5e8f37ed3a9 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java
@@ -117,7 +117,7 @@ public static Metric get(String name) {
}
public static void writeSetTo(StreamOutput out, EnumSet metrics) throws IOException {
- if (out.getTransportVersion().onOrAfter(TransportVersions.NODES_STATS_ENUM_SET)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeEnumSet(metrics);
} else {
out.writeCollection(metrics, (output, metric) -> output.writeString(metric.metricName));
@@ -125,7 +125,7 @@ public static void writeSetTo(StreamOutput out, EnumSet metrics) throws
}
public static EnumSet readSetFrom(StreamInput in) throws IOException {
- if (in.getTransportVersion().onOrAfter(TransportVersions.NODES_STATS_ENUM_SET)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
return in.readEnumSet(Metric.class);
} else {
return in.readCollection((i) -> EnumSet.noneOf(Metric.class), (is, out) -> {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
index 9c9467db40de..b6ced0662330 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
@@ -118,7 +118,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException {
waitForCompletion = in.readBoolean();
partial = in.readBoolean();
userMetadata = in.readGenericMap();
- uuid = in.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS) ? in.readOptionalString() : null;
+ uuid = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalString() : null;
}
@Override
@@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(waitForCompletion);
out.writeBoolean(partial);
out.writeGenericMap(userMetadata);
- if (out.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalString(uuid);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
index f99baa855404..abeb73e5d8c3 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
@@ -44,14 +44,11 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException {
} else {
searchUsageStats = new SearchUsageStats();
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
repositoryUsageStats = RepositoryUsageStats.readFrom(in);
- } else {
- repositoryUsageStats = RepositoryUsageStats.EMPTY;
- }
- if (in.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) {
ccsMetrics = new CCSTelemetrySnapshot(in);
} else {
+ repositoryUsageStats = RepositoryUsageStats.EMPTY;
ccsMetrics = new CCSTelemetrySnapshot();
}
}
@@ -118,12 +115,10 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) {
searchUsageStats.writeTo(out);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
repositoryUsageStats.writeTo(out);
- } // else just drop these stats, ok for bwc
- if (out.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) {
ccsMetrics.writeTo(out);
- }
+ } // else just drop these stats, ok for bwc
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java
index 47843a91351e..6c3c5cbb50ec 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java
@@ -36,9 +36,9 @@ public ActionRequestValidationException validate() {
@Override
public void writeTo(StreamOutput out) throws IOException {
- assert out.getTransportVersion().onOrAfter(TransportVersions.CCS_REMOTE_TELEMETRY_STATS)
+ assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)
: "RemoteClusterStatsRequest is not supported by the remote cluster";
- if (out.getTransportVersion().before(TransportVersions.CCS_REMOTE_TELEMETRY_STATS)) {
+ if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) {
throw new UnsupportedOperationException("RemoteClusterStatsRequest is not supported by the remote cluster");
}
super.writeTo(out);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java
index 0f6c56fd21bd..a6e80b5efd08 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java
@@ -22,8 +22,8 @@
import java.util.Map;
import java.util.Objects;
-import static org.elasticsearch.TransportVersions.RETRIEVERS_TELEMETRY_ADDED;
import static org.elasticsearch.TransportVersions.V_8_12_0;
+import static org.elasticsearch.TransportVersions.V_8_16_0;
/**
* Holds a snapshot of the search usage statistics.
@@ -71,7 +71,7 @@ public SearchUsageStats(StreamInput in) throws IOException {
this.sections = in.readMap(StreamInput::readLong);
this.totalSearchCount = in.readVLong();
this.rescorers = in.getTransportVersion().onOrAfter(V_8_12_0) ? in.readMap(StreamInput::readLong) : Map.of();
- this.retrievers = in.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED) ? in.readMap(StreamInput::readLong) : Map.of();
+ this.retrievers = in.getTransportVersion().onOrAfter(V_8_16_0) ? in.readMap(StreamInput::readLong) : Map.of();
}
@Override
@@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(V_8_12_0)) {
out.writeMap(rescorers, StreamOutput::writeLong);
}
- if (out.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED)) {
+ if (out.getTransportVersion().onOrAfter(V_8_16_0)) {
out.writeMap(retrievers, StreamOutput::writeLong);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
index 97585ea9a102..2c20daa5d7af 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
@@ -12,6 +12,7 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.AlreadyClosedException;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.ActionType;
@@ -72,8 +73,6 @@
import java.util.function.BooleanSupplier;
import java.util.stream.Collectors;
-import static org.elasticsearch.TransportVersions.CCS_REMOTE_TELEMETRY_STATS;
-
/**
* Transport action implementing _cluster/stats API.
*/
@@ -450,7 +449,7 @@ protected void sendItemRequest(String clusterAlias, ActionListener {
- if (connection.getTransportVersion().before(CCS_REMOTE_TELEMETRY_STATS)) {
+ if (connection.getTransportVersion().before(TransportVersions.V_8_16_0)) {
responseListener.onResponse(null);
} else {
remoteClusterClient.execute(connection, TransportRemoteClusterStatsAction.REMOTE_TYPE, remoteRequest, responseListener);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java
index c6d990e5a1d6..f729455edcc2 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java
@@ -131,8 +131,7 @@ public Response(StreamInput in) throws IOException {
} else {
rolloverConfiguration = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)
- && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) {
+ if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
in.readOptionalWriteable(DataStreamGlobalRetention::read);
}
}
@@ -190,8 +189,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeOptionalWriteable(rolloverConfiguration);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)
- && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) {
+ if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(null);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java
index a47f89030cc6..67f87476ea6a 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java
@@ -132,8 +132,7 @@ public Response(StreamInput in) throws IOException {
} else {
rolloverConfiguration = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)
- && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) {
+ if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
in.readOptionalWriteable(DataStreamGlobalRetention::read);
}
}
@@ -191,8 +190,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeOptionalWriteable(rolloverConfiguration);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)
- && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) {
+ if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(null);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
index 064c24cf4afa..80e6fbfe051a 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
@@ -82,8 +82,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException {
rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)
? in.readOptionalWriteable(RolloverConfiguration::new)
: null;
- if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)
- && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) {
+ if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
in.readOptionalWriteable(DataStreamGlobalRetention::read);
}
}
@@ -104,8 +103,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeOptionalWriteable(rolloverConfiguration);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)
- && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) {
+ if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(null);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
index d5931c85bb2e..1ff970de7525 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
@@ -200,7 +200,7 @@ public Failure(StreamInput in) throws IOException {
seqNo = in.readZLong();
term = in.readVLong();
aborted = in.readBoolean();
- if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus = IndexDocFailureStoreStatus.read(in);
} else {
failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN;
@@ -218,7 +218,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeZLong(seqNo);
out.writeVLong(term);
out.writeBoolean(aborted);
- if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus.writeTo(out);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
index f62b2f48fa2f..91caebc420ff 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
@@ -98,7 +98,7 @@ public BulkRequest(StreamInput in) throws IOException {
for (DocWriteRequest> request : requests) {
indices.add(Objects.requireNonNull(request.index(), "request index must not be null"));
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
incrementalState = new BulkRequest.IncrementalState(in);
} else {
incrementalState = BulkRequest.IncrementalState.EMPTY;
@@ -454,7 +454,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(requests, DocWriteRequest::writeDocumentRequest);
refreshPolicy.writeTo(out);
out.writeTimeValue(timeout);
- if (out.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
incrementalState.writeTo(out);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java
index ec7a08007de9..12d3aa67ca9b 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java
@@ -46,7 +46,7 @@ public BulkResponse(StreamInput in) throws IOException {
responses = in.readArray(BulkItemResponse::new, BulkItemResponse[]::new);
tookInMillis = in.readVLong();
ingestTookInMillis = in.readZLong();
- if (in.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
incrementalState = new BulkRequest.IncrementalState(in);
} else {
incrementalState = BulkRequest.IncrementalState.EMPTY;
@@ -151,7 +151,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeArray(responses);
out.writeVLong(tookInMillis);
out.writeZLong(ingestTookInMillis);
- if (out.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
incrementalState.writeTo(out);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java b/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java
index cb83d693a415..7367dfa1d53f 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java
@@ -124,7 +124,7 @@ public ExceptionWithFailureStoreStatus(BulkItemResponse.Failure failure) {
public ExceptionWithFailureStoreStatus(StreamInput in) throws IOException {
super(in);
- if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus = IndexDocFailureStoreStatus.fromId(in.readByte());
} else {
failureStoreStatus = NOT_APPLICABLE_OR_UNKNOWN;
@@ -134,7 +134,7 @@ public ExceptionWithFailureStoreStatus(StreamInput in) throws IOException {
@Override
protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException {
super.writeTo(out, nestedExceptionsWriter);
- if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeByte(failureStoreStatus.getId());
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java
index cc7fd431d809..290d342e9dc1 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java
@@ -135,14 +135,11 @@ public SimulateBulkRequest(
public SimulateBulkRequest(StreamInput in) throws IOException {
super(in);
this.pipelineSubstitutions = (Map>) in.readGenericValue();
- if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.componentTemplateSubstitutions = (Map>) in.readGenericValue();
- } else {
- componentTemplateSubstitutions = Map.of();
- }
- if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) {
this.indexTemplateSubstitutions = (Map>) in.readGenericValue();
} else {
+ componentTemplateSubstitutions = Map.of();
indexTemplateSubstitutions = Map.of();
}
if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) {
@@ -156,10 +153,8 @@ public SimulateBulkRequest(StreamInput in) throws IOException {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeGenericValue(pipelineSubstitutions);
- if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeGenericValue(componentTemplateSubstitutions);
- }
- if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) {
out.writeGenericValue(indexTemplateSubstitutions);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) {
diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java
index c1cf0fa7aab4..93c40ad18cc8 100644
--- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java
+++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java
@@ -112,7 +112,7 @@ public Request(StreamInput in) throws IOException {
} else {
this.includeDefaults = false;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.verbose = in.readBoolean();
} else {
this.verbose = false;
@@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeBoolean(includeDefaults);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeBoolean(verbose);
}
}
@@ -275,7 +275,7 @@ public DataStreamInfo(
in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null,
in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readMap(Index::new, IndexProperties::new) : Map.of(),
in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true,
- in.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE) ? in.readOptionalVLong() : null
+ in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null
);
}
@@ -328,7 +328,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeMap(indexSettingsValues);
out.writeBoolean(templatePreferIlmValue);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalVLong(maximumTimestamp);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java
index d16100a64713..6f510ad26f5e 100644
--- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java
@@ -62,7 +62,7 @@ public FieldCapabilitiesIndexResponse(
} else {
this.indexMappingHash = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.indexMode = IndexMode.readFrom(in);
} else {
this.indexMode = IndexMode.STANDARD;
@@ -77,7 +77,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) {
out.writeOptionalString(indexMappingHash);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
IndexMode.writeTo(indexMode, out);
}
}
@@ -105,7 +105,7 @@ static List readList(StreamInput input) throws I
private static void collectCompressedResponses(StreamInput input, int groups, ArrayList responses)
throws IOException {
final CompressedGroup[] compressedGroups = new CompressedGroup[groups];
- final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE);
+ final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0);
for (int i = 0; i < groups; i++) {
final String[] indices = input.readStringArray();
final IndexMode indexMode = readIndexMode ? IndexMode.readFrom(input) : IndexMode.STANDARD;
@@ -179,7 +179,7 @@ private static void writeCompressedResponses(StreamOutput output, Map {
o.writeCollection(fieldCapabilitiesIndexResponses, (oo, r) -> oo.writeString(r.indexName));
var first = fieldCapabilitiesIndexResponses.get(0);
- if (output.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) {
+ if (output.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
IndexMode.writeTo(first.indexMode, o);
}
o.writeString(first.indexMappingHash);
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index c0811e7424b0..5254c6fd06db 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -205,10 +205,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) {
in.readZLong(); // obsolete normalisedBytesParsed
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
in.readBoolean(); // obsolete originatesFromUpdateByScript
- }
- if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) {
in.readBoolean(); // obsolete originatesFromUpdateByDoc
}
}
@@ -789,10 +787,8 @@ private void writeBody(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) {
out.writeZLong(-1); // obsolete normalisedBytesParsed
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeBoolean(false); // obsolete originatesFromUpdateByScript
- }
- if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) {
out.writeBoolean(false); // obsolete originatesFromUpdateByDoc
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java
index 8d1bdf227e24..7c45de890517 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java
@@ -46,7 +46,7 @@ public IndexResponse(ShardId shardId, StreamInput in) throws IOException {
} else {
executedPipelines = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus = IndexDocFailureStoreStatus.read(in);
} else {
failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN;
@@ -60,7 +60,7 @@ public IndexResponse(StreamInput in) throws IOException {
} else {
executedPipelines = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus = IndexDocFailureStoreStatus.read(in);
} else {
failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN;
@@ -126,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
out.writeOptionalCollection(executedPipelines, StreamOutput::writeString);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus.writeTo(out);
}
}
@@ -137,7 +137,7 @@ public void writeThin(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
out.writeOptionalCollection(executedPipelines, StreamOutput::writeString);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
failureStoreStatus.writeTo(out);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java
index 969ba2ad983c..d68e2ce1b02b 100644
--- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java
@@ -63,7 +63,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException {
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class);
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.allowPartialSearchResults = in.readBoolean();
}
}
@@ -82,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
out.writeOptionalWriteable(indexFilter);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeBoolean(allowPartialSearchResults);
} else if (allowPartialSearchResults) {
throw new IOException("[allow_partial_search_results] is not supported on nodes with version " + out.getTransportVersion());
diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java
index 3c830c8ed9dc..b3ffc564d848 100644
--- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java
@@ -47,7 +47,7 @@ public OpenPointInTimeResponse(
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBytesReference(pointInTimeId);
- if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVInt(totalShards);
out.writeVInt(successfulShards);
out.writeVInt(failedShards);
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java
index ca810bb88653..c2f1510341fb 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java
@@ -63,14 +63,14 @@ public static BytesReference encode(
TransportVersion version,
ShardSearchFailure[] shardFailures
) {
- assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)
+ assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.V_8_16_0)
: "[allow_partial_search_results] cannot be enabled on a cluster that has not been fully upgraded to version ["
- + TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT
+ + TransportVersions.V_8_16_0.toReleaseVersion()
+ "] or higher.";
try (var out = new BytesStreamOutput()) {
out.setTransportVersion(version);
TransportVersion.writeVersion(version, out);
- boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT);
+ boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0);
int shardSize = searchPhaseResults.size() + (allowNullContextId ? shardFailures.length : 0);
out.writeVInt(shardSize);
for (var searchResult : searchPhaseResults) {
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java
index 7509a7b0fed0..f91a9d09f4bb 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java
@@ -37,7 +37,7 @@ public final class SearchContextIdForNode implements Writeable {
}
SearchContextIdForNode(StreamInput in) throws IOException {
- boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT);
+ boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0);
this.node = allowNull ? in.readOptionalString() : in.readString();
this.clusterAlias = in.readOptionalString();
this.searchContextId = allowNull ? in.readOptionalWriteable(ShardSearchContextId::new) : new ShardSearchContextId(in);
@@ -45,7 +45,7 @@ public final class SearchContextIdForNode implements Writeable {
@Override
public void writeTo(StreamOutput out) throws IOException {
- boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT);
+ boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0);
if (allowNull) {
out.writeOptionalString(node);
} else {
@@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException {
// We should never set a null node if the cluster is not fully upgraded to a version that can handle it.
throw new IOException(
"Cannot write null node value to a node in version "
- + out.getTransportVersion()
+ + out.getTransportVersion().toReleaseVersion()
+ ". The target node must be specified to retrieve the ShardSearchContextId."
);
}
@@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException {
// We should never set a null search context id if the cluster is not fully upgraded to a version that can handle it.
throw new IOException(
"Cannot write null search context ID to a node in version "
- + out.getTransportVersion()
+ + out.getTransportVersion().toReleaseVersion()
+ ". A valid search context ID is required to identify the shard's search context in this version."
);
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java
index 9e60eedbad6a..36ca0fba9437 100644
--- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java
@@ -104,8 +104,7 @@ public TransportOpenPointInTimeAction(
protected void doExecute(Task task, OpenPointInTimeRequest request, ActionListener listener) {
final ClusterState clusterState = clusterService.state();
// Check if all the nodes in this cluster know about the service
- if (request.allowPartialSearchResults()
- && clusterState.getMinTransportVersion().before(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) {
+ if (request.allowPartialSearchResults() && clusterState.getMinTransportVersion().before(TransportVersions.V_8_16_0)) {
listener.onFailure(
new ElasticsearchStatusException(
format(
diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java
index 85889d8398cb..ebbd47336e3d 100644
--- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java
+++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java
@@ -982,12 +982,11 @@ public void writeIndicesOptions(StreamOutput out) throws IOException {
states.add(WildcardStates.HIDDEN);
}
out.writeEnumSet(states);
- if (out.getTransportVersion()
- .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) {
+ if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
out.writeBoolean(includeRegularIndices());
out.writeBoolean(includeFailureIndices());
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
selectorOptions.writeTo(out);
}
}
@@ -1010,8 +1009,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti
.ignoreThrottled(options.contains(Option.IGNORE_THROTTLED))
.build();
SelectorOptions selectorOptions = SelectorOptions.DEFAULT;
- if (in.getTransportVersion()
- .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) {
+ if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
// Reading from an older node, which will be sending two booleans that we must read out and ignore.
var includeData = in.readBoolean();
var includeFailures = in.readBoolean();
@@ -1023,7 +1021,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti
selectorOptions = SelectorOptions.FAILURES;
}
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
selectorOptions = SelectorOptions.read(in);
}
return new IndicesOptions(
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
index c06ea9305aef..27cbb39c05d3 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
@@ -42,9 +42,7 @@
import org.elasticsearch.nativeaccess.NativeAccess;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
-import org.elasticsearch.plugins.PluginBundle;
import org.elasticsearch.plugins.PluginsLoader;
-import org.elasticsearch.plugins.PluginsUtils;
import java.io.IOException;
import java.io.InputStream;
@@ -54,10 +52,8 @@
import java.nio.file.Path;
import java.security.Permission;
import java.security.Security;
-import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
-import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
@@ -208,21 +204,17 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException {
// load the plugin Java modules and layers now for use in entitlements
var pluginsLoader = PluginsLoader.createPluginsLoader(nodeEnv.modulesFile(), nodeEnv.pluginsFile());
bootstrap.setPluginsLoader(pluginsLoader);
+ var pluginsResolver = PluginsResolver.create(pluginsLoader);
if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) {
LogManager.getLogger(Elasticsearch.class).info("Bootstrapping Entitlements");
- List> pluginData = new ArrayList<>();
- Set moduleBundles = PluginsUtils.getModuleBundles(nodeEnv.modulesFile());
- for (PluginBundle moduleBundle : moduleBundles) {
- pluginData.add(Tuple.tuple(moduleBundle.getDir(), moduleBundle.pluginDescriptor().isModular()));
- }
- Set pluginBundles = PluginsUtils.getPluginBundles(nodeEnv.pluginsFile());
- for (PluginBundle pluginBundle : pluginBundles) {
- pluginData.add(Tuple.tuple(pluginBundle.getDir(), pluginBundle.pluginDescriptor().isModular()));
- }
- // TODO: add a functor to map module to plugin name
- EntitlementBootstrap.bootstrap(pluginData, callerClass -> null);
+ List> pluginData = pluginsLoader.allBundles()
+ .stream()
+ .map(bundle -> Tuple.tuple(bundle.getDir(), bundle.pluginDescriptor().isModular()))
+ .toList();
+
+ EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName);
} else {
// install SM after natives, shutdown hooks, etc.
LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager");
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java b/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java
new file mode 100644
index 000000000000..256e91cbee16
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.bootstrap;
+
+import org.elasticsearch.plugins.PluginsLoader;
+
+import java.util.HashMap;
+import java.util.Map;
+
+class PluginsResolver {
+ private final Map pluginNameByModule;
+
+ private PluginsResolver(Map pluginNameByModule) {
+ this.pluginNameByModule = pluginNameByModule;
+ }
+
+ public static PluginsResolver create(PluginsLoader pluginsLoader) {
+ Map pluginNameByModule = new HashMap<>();
+
+ pluginsLoader.pluginLayers().forEach(pluginLayer -> {
+ var pluginName = pluginLayer.pluginBundle().pluginDescriptor().getName();
+ if (pluginLayer.pluginModuleLayer() != null && pluginLayer.pluginModuleLayer() != ModuleLayer.boot()) {
+ // This plugin is a Java Module
+ for (var module : pluginLayer.pluginModuleLayer().modules()) {
+ pluginNameByModule.put(module, pluginName);
+ }
+ } else {
+ // This plugin is not modularized
+ pluginNameByModule.put(pluginLayer.pluginClassLoader().getUnnamedModule(), pluginName);
+ }
+ });
+
+ return new PluginsResolver(pluginNameByModule);
+ }
+
+ public String resolveClassToPluginName(Class> clazz) {
+ var module = clazz.getModule();
+ return pluginNameByModule.get(module);
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java
index b6c1defe91a7..9cf567c21966 100644
--- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java
+++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java
@@ -111,7 +111,7 @@ public ClusterIndexHealth(final StreamInput in) throws IOException {
unassignedShards = in.readVInt();
status = ClusterHealthStatus.readFrom(in);
shards = in.readMapValues(ClusterShardHealth::new, ClusterShardHealth::getShardId);
- if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
unassignedPrimaryShards = in.readVInt();
} else {
unassignedPrimaryShards = 0;
@@ -203,7 +203,7 @@ public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(unassignedShards);
out.writeByte(status.value());
out.writeMapValues(shards);
- if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVInt(unassignedPrimaryShards);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java
index 63863542564c..f512acb6e04d 100644
--- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java
+++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java
@@ -96,7 +96,7 @@ public ClusterShardHealth(final StreamInput in) throws IOException {
initializingShards = in.readVInt();
unassignedShards = in.readVInt();
primaryActive = in.readBoolean();
- if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
unassignedPrimaryShards = in.readVInt();
} else {
unassignedPrimaryShards = 0;
@@ -167,7 +167,7 @@ public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(initializingShards);
out.writeVInt(unassignedShards);
out.writeBoolean(primaryActive);
- if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVInt(unassignedPrimaryShards);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java
index 579429b5d51d..31f275e29c36 100644
--- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java
+++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java
@@ -120,7 +120,7 @@ public ClusterStateHealth(final StreamInput in) throws IOException {
status = ClusterHealthStatus.readFrom(in);
indices = in.readMapValues(ClusterIndexHealth::new, ClusterIndexHealth::getIndex);
activeShardsPercent = in.readDouble();
- if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
unassignedPrimaryShards = in.readVInt();
} else {
unassignedPrimaryShards = 0;
@@ -212,7 +212,7 @@ public void writeTo(final StreamOutput out) throws IOException {
out.writeByte(status.value());
out.writeMapValues(indices);
out.writeDouble(activeShardsPercent);
- if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVInt(unassignedPrimaryShards);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
index 4dcc7c73c280..979434950cf7 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java
@@ -71,6 +71,7 @@ public final class DataStream implements SimpleDiffable, ToXContentO
public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store");
public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0;
public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0;
+ public static final TransportVersion ADD_DATA_STREAM_OPTIONS_VERSION = TransportVersions.V_8_16_0;
public static boolean isFailureStoreFeatureFlagEnabled() {
return FAILURE_STORE_FEATURE_FLAG.isEnabled();
@@ -200,9 +201,7 @@ public static DataStream read(StreamInput in) throws IOException {
: null;
// This boolean flag has been moved in data stream options
var failureStoreEnabled = in.getTransportVersion()
- .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS)
- ? in.readBoolean()
- : false;
+ .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.V_8_16_0) ? in.readBoolean() : false;
var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)
? readIndices(in)
: List.of();
@@ -216,7 +215,7 @@ public static DataStream read(StreamInput in) throws IOException {
.setAutoShardingEvent(in.readOptionalWriteable(DataStreamAutoShardingEvent::new));
}
DataStreamOptions dataStreamOptions;
- if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_DATA_STREAM_OPTIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
dataStreamOptions = in.readOptionalWriteable(DataStreamOptions::read);
} else {
// We cannot distinguish if failure store was explicitly disabled or not. Given that failure store
@@ -1077,7 +1076,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(lifecycle);
}
if (out.getTransportVersion()
- .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS)) {
+ .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) {
out.writeBoolean(isFailureStoreEnabled());
}
if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) {
@@ -1093,7 +1092,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(failureIndices.rolloverOnWrite);
out.writeOptionalWriteable(failureIndices.autoShardingEvent);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_DATA_STREAM_OPTIONS)) {
+ if (out.getTransportVersion().onOrAfter(DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) {
out.writeOptionalWriteable(dataStreamOptions.isEmpty() ? null : dataStreamOptions);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java
index 271c60e829a8..8917d5a9cbbb 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java
@@ -9,6 +9,7 @@
package org.elasticsearch.cluster.metadata;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.SimpleDiffable;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -23,8 +24,6 @@
import java.util.List;
import java.util.Objects;
-import static org.elasticsearch.TransportVersions.SEMANTIC_TEXT_SEARCH_INFERENCE_ID;
-
/**
* Contains inference field data for fields.
* As inference is done in the coordinator node to avoid re-doing it at shard / replica level, the coordinator needs to check for the need
@@ -56,7 +55,7 @@ public InferenceFieldMetadata(String name, String inferenceId, String searchInfe
public InferenceFieldMetadata(StreamInput input) throws IOException {
this.name = input.readString();
this.inferenceId = input.readString();
- if (input.getTransportVersion().onOrAfter(SEMANTIC_TEXT_SEARCH_INFERENCE_ID)) {
+ if (input.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.searchInferenceId = input.readString();
} else {
this.searchInferenceId = this.inferenceId;
@@ -68,7 +67,7 @@ public InferenceFieldMetadata(StreamInput input) throws IOException {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(inferenceId);
- if (out.getTransportVersion().onOrAfter(SEMANTIC_TEXT_SEARCH_INFERENCE_ID)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeString(searchInferenceId);
}
out.writeStringArray(sourceFields);
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
index 790b8e4ab75f..60cf6b10417f 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
@@ -317,7 +317,7 @@ public static Diff readDiffFrom(StreamInput in) throws IOException
public static RoutingTable readFrom(StreamInput in) throws IOException {
Builder builder = new Builder();
- if (in.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) {
+ if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) {
in.readLong(); // previously 'version', unused in all applicable versions so any number will do
}
int size = in.readVInt();
@@ -331,7 +331,7 @@ public static RoutingTable readFrom(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) {
+ if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) {
out.writeLong(0); // previously 'version', unused in all applicable versions so any number will do
}
out.writeCollection(indicesRouting.values());
@@ -349,7 +349,7 @@ private static class RoutingTableDiff implements Diff {
new DiffableUtils.DiffableValueReader<>(IndexRoutingTable::readFrom, IndexRoutingTable::readDiffFrom);
RoutingTableDiff(StreamInput in) throws IOException {
- if (in.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) {
+ if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) {
in.readLong(); // previously 'version', unused in all applicable versions so any number will do
}
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DIFF_VALUE_READER);
@@ -366,7 +366,7 @@ public RoutingTable apply(RoutingTable part) {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) {
+ if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) {
out.writeLong(0); // previously 'version', unused in all applicable versions so any number will do
}
indicesRouting.writeTo(out);
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index 644cc6bb6992..e07861ba0543 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -908,11 +908,8 @@ public final Instant readOptionalInstant() throws IOException {
private ZonedDateTime readZonedDateTime() throws IOException {
final String timeZoneId = readString();
final Instant instant;
- if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) {
- // epoch seconds can be negative, but it was incorrectly first written as vlong
- boolean zlong = getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT);
- long seconds = zlong ? readZLong() : readVLong();
- instant = Instant.ofEpochSecond(seconds, readInt());
+ if (getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
+ instant = Instant.ofEpochSecond(readZLong(), readInt());
} else {
instant = Instant.ofEpochMilli(readLong());
}
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
index d724e5ea25ca..6738af32f04d 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
@@ -768,13 +768,8 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep
final ZonedDateTime zonedDateTime = (ZonedDateTime) v;
o.writeString(zonedDateTime.getZone().getId());
Instant instant = zonedDateTime.toInstant();
- if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) {
- // epoch seconds can be negative, but it was incorrectly first written as vlong
- if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) {
- o.writeZLong(instant.getEpochSecond());
- } else {
- o.writeVLong(instant.getEpochSecond());
- }
+ if (o.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
+ o.writeZLong(instant.getEpochSecond());
o.writeInt(instant.getNano());
} else {
o.writeLong(instant.toEpochMilli());
diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java
index a871524b45e9..520174a4b363 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java
@@ -46,7 +46,7 @@ public CommitStats(SegmentInfos segmentInfos) {
generation = in.readLong();
id = in.readOptionalString();
numDocs = in.readInt();
- numLeaves = in.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS) ? in.readVInt() : 0;
+ numLeaves = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readVInt() : 0;
}
@Override
@@ -100,7 +100,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeLong(generation);
out.writeOptionalString(id);
out.writeInt(numDocs);
- if (out.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVInt(numLeaves);
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
index e00e7b232000..9ddb6f0d496a 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
@@ -946,7 +946,9 @@ public Query termQuery(Object value, SearchExecutionContext context) {
protected void parseCreateField(DocumentParserContext context) {
// Run-time fields are mapped to this mapper, so it needs to handle storing values for use in synthetic source.
// #parseValue calls this method once the run-time field is created.
- if (context.dynamic() == ObjectMapper.Dynamic.RUNTIME && context.canAddIgnoredField()) {
+ var fieldType = context.mappingLookup().getFieldType(path);
+ boolean isRuntimeField = fieldType instanceof AbstractScriptFieldType;
+ if ((context.dynamic() == ObjectMapper.Dynamic.RUNTIME || isRuntimeField) && context.canAddIgnoredField()) {
try {
context.addIgnoredField(
IgnoredSourceFieldMapper.NameValue.fromContext(context, path, context.encodeFlattenedToken())
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java
index 56210a292995..10b085654039 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java
@@ -52,7 +52,7 @@ public NodeMappingStats() {
public NodeMappingStats(StreamInput in) throws IOException {
totalCount = in.readVLong();
totalEstimatedOverhead = in.readVLong();
- if (in.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
totalSegments = in.readVLong();
totalSegmentFields = in.readVLong();
}
@@ -93,7 +93,7 @@ public long getTotalSegmentFields() {
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(totalCount);
out.writeVLong(totalEstimatedOverhead);
- if (out.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVLong(totalSegments);
out.writeVLong(totalSegmentFields);
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
index 647e45d1beda..6ae0c4872cfa 100644
--- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
+++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
@@ -825,7 +825,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.REGEX_AND_RANGE_INTERVAL_QUERIES;
+ return TransportVersions.V_8_16_0;
}
@Override
@@ -1129,7 +1129,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.REGEX_AND_RANGE_INTERVAL_QUERIES;
+ return TransportVersions.V_8_16_0;
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
index 83bca7d27aee..503b2adf756f 100644
--- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
@@ -321,8 +321,7 @@ public static Query toQuery(
// ToParentBlockJoinQuery requires that the inner query only matches documents
// in its child space
- NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped);
- if (nestedHelper.mightMatchNonNestedDocs(innerQuery, path)) {
+ if (NestedHelper.mightMatchNonNestedDocs(innerQuery, path, context)) {
innerQuery = Queries.filtered(innerQuery, mapper.nestedTypeFilter());
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java
index 33077697a2ce..889fa40b79aa 100644
--- a/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java
@@ -25,8 +25,6 @@
import java.util.Map;
import java.util.Objects;
-import static org.elasticsearch.TransportVersions.RRF_QUERY_REWRITE;
-
public class RankDocsQueryBuilder extends AbstractQueryBuilder {
public static final String NAME = "rank_docs_query";
@@ -44,7 +42,7 @@ public RankDocsQueryBuilder(RankDoc[] rankDocs, QueryBuilder[] queryBuilders, bo
public RankDocsQueryBuilder(StreamInput in) throws IOException {
super(in);
this.rankDocs = in.readArray(c -> c.readNamedWriteable(RankDoc.class), RankDoc[]::new);
- if (in.getTransportVersion().onOrAfter(RRF_QUERY_REWRITE)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.queryBuilders = in.readOptionalArray(c -> c.readNamedWriteable(QueryBuilder.class), QueryBuilder[]::new);
this.onlyRankDocs = in.readBoolean();
} else {
@@ -85,7 +83,7 @@ public RankDoc[] rankDocs() {
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeArray(StreamOutput::writeNamedWriteable, rankDocs);
- if (out.getTransportVersion().onOrAfter(RRF_QUERY_REWRITE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalArray(StreamOutput::writeNamedWriteable, queryBuilders);
out.writeBoolean(onlyRankDocs);
}
@@ -145,6 +143,6 @@ protected int doHashCode() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.RANK_DOCS_RETRIEVER;
+ return TransportVersions.V_8_16_0;
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java
index b07112440d3c..d5e48a6a54da 100644
--- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java
+++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java
@@ -493,14 +493,18 @@ public boolean containsBrokenAnalysis(String field) {
*/
public SearchLookup lookup() {
if (this.lookup == null) {
- SourceProvider sourceProvider = isSourceSynthetic()
- ? SourceProvider.fromSyntheticSource(mappingLookup.getMapping(), mapperMetrics.sourceFieldMetrics())
- : SourceProvider.fromStoredFields();
+ var sourceProvider = createSourceProvider();
setLookupProviders(sourceProvider, LeafFieldLookupProvider.fromStoredFields());
}
return this.lookup;
}
+ public SourceProvider createSourceProvider() {
+ return isSourceSynthetic()
+ ? SourceProvider.fromSyntheticSource(mappingLookup.getMapping(), mapperMetrics.sourceFieldMetrics())
+ : SourceProvider.fromStoredFields();
+ }
+
/**
* Replace the standard source provider and field lookup provider on the SearchLookup
*
diff --git a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java
index 96e8ac35c8e3..a04f930e052b 100644
--- a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java
+++ b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java
@@ -21,29 +21,21 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
-import org.elasticsearch.index.mapper.NestedLookup;
import org.elasticsearch.index.mapper.NestedObjectMapper;
-
-import java.util.function.Predicate;
+import org.elasticsearch.index.query.SearchExecutionContext;
/** Utility class to filter parent and children clauses when building nested
* queries. */
public final class NestedHelper {
- private final NestedLookup nestedLookup;
- private final Predicate isMappedFieldPredicate;
-
- public NestedHelper(NestedLookup nestedLookup, Predicate isMappedFieldPredicate) {
- this.nestedLookup = nestedLookup;
- this.isMappedFieldPredicate = isMappedFieldPredicate;
- }
+ private NestedHelper() {}
/** Returns true if the given query might match nested documents. */
- public boolean mightMatchNestedDocs(Query query) {
+ public static boolean mightMatchNestedDocs(Query query, SearchExecutionContext searchExecutionContext) {
if (query instanceof ConstantScoreQuery) {
- return mightMatchNestedDocs(((ConstantScoreQuery) query).getQuery());
+ return mightMatchNestedDocs(((ConstantScoreQuery) query).getQuery(), searchExecutionContext);
} else if (query instanceof BoostQuery) {
- return mightMatchNestedDocs(((BoostQuery) query).getQuery());
+ return mightMatchNestedDocs(((BoostQuery) query).getQuery(), searchExecutionContext);
} else if (query instanceof MatchAllDocsQuery) {
return true;
} else if (query instanceof MatchNoDocsQuery) {
@@ -51,17 +43,17 @@ public boolean mightMatchNestedDocs(Query query) {
} else if (query instanceof TermQuery) {
// We only handle term(s) queries and range queries, which should already
// cover a high majority of use-cases
- return mightMatchNestedDocs(((TermQuery) query).getTerm().field());
+ return mightMatchNestedDocs(((TermQuery) query).getTerm().field(), searchExecutionContext);
} else if (query instanceof TermInSetQuery tis) {
if (tis.getTermsCount() > 0) {
- return mightMatchNestedDocs(tis.getField());
+ return mightMatchNestedDocs(tis.getField(), searchExecutionContext);
} else {
return false;
}
} else if (query instanceof PointRangeQuery) {
- return mightMatchNestedDocs(((PointRangeQuery) query).getField());
+ return mightMatchNestedDocs(((PointRangeQuery) query).getField(), searchExecutionContext);
} else if (query instanceof IndexOrDocValuesQuery) {
- return mightMatchNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery());
+ return mightMatchNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), searchExecutionContext);
} else if (query instanceof final BooleanQuery bq) {
final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired);
if (hasRequiredClauses) {
@@ -69,13 +61,13 @@ public boolean mightMatchNestedDocs(Query query) {
.stream()
.filter(BooleanClause::isRequired)
.map(BooleanClause::query)
- .allMatch(this::mightMatchNestedDocs);
+ .allMatch(f -> mightMatchNestedDocs(f, searchExecutionContext));
} else {
return bq.clauses()
.stream()
.filter(c -> c.occur() == Occur.SHOULD)
.map(BooleanClause::query)
- .anyMatch(this::mightMatchNestedDocs);
+ .anyMatch(f -> mightMatchNestedDocs(f, searchExecutionContext));
}
} else if (query instanceof ESToParentBlockJoinQuery) {
return ((ESToParentBlockJoinQuery) query).getPath() != null;
@@ -85,7 +77,7 @@ public boolean mightMatchNestedDocs(Query query) {
}
/** Returns true if a query on the given field might match nested documents. */
- boolean mightMatchNestedDocs(String field) {
+ private static boolean mightMatchNestedDocs(String field, SearchExecutionContext searchExecutionContext) {
if (field.startsWith("_")) {
// meta field. Every meta field behaves differently, eg. nested
// documents have the same _uid as their parent, put their path in
@@ -94,36 +86,36 @@ boolean mightMatchNestedDocs(String field) {
// we might add a nested filter when it is nor required.
return true;
}
- if (isMappedFieldPredicate.test(field) == false) {
+ if (searchExecutionContext.isFieldMapped(field) == false) {
// field does not exist
return false;
}
- return nestedLookup.getNestedParent(field) != null;
+ return searchExecutionContext.nestedLookup().getNestedParent(field) != null;
}
/** Returns true if the given query might match parent documents or documents
* that are nested under a different path. */
- public boolean mightMatchNonNestedDocs(Query query, String nestedPath) {
+ public static boolean mightMatchNonNestedDocs(Query query, String nestedPath, SearchExecutionContext searchExecutionContext) {
if (query instanceof ConstantScoreQuery) {
- return mightMatchNonNestedDocs(((ConstantScoreQuery) query).getQuery(), nestedPath);
+ return mightMatchNonNestedDocs(((ConstantScoreQuery) query).getQuery(), nestedPath, searchExecutionContext);
} else if (query instanceof BoostQuery) {
- return mightMatchNonNestedDocs(((BoostQuery) query).getQuery(), nestedPath);
+ return mightMatchNonNestedDocs(((BoostQuery) query).getQuery(), nestedPath, searchExecutionContext);
} else if (query instanceof MatchAllDocsQuery) {
return true;
} else if (query instanceof MatchNoDocsQuery) {
return false;
} else if (query instanceof TermQuery) {
- return mightMatchNonNestedDocs(((TermQuery) query).getTerm().field(), nestedPath);
+ return mightMatchNonNestedDocs(searchExecutionContext, ((TermQuery) query).getTerm().field(), nestedPath);
} else if (query instanceof TermInSetQuery tis) {
if (tis.getTermsCount() > 0) {
- return mightMatchNonNestedDocs(tis.getField(), nestedPath);
+ return mightMatchNonNestedDocs(searchExecutionContext, tis.getField(), nestedPath);
} else {
return false;
}
} else if (query instanceof PointRangeQuery) {
- return mightMatchNonNestedDocs(((PointRangeQuery) query).getField(), nestedPath);
+ return mightMatchNonNestedDocs(searchExecutionContext, ((PointRangeQuery) query).getField(), nestedPath);
} else if (query instanceof IndexOrDocValuesQuery) {
- return mightMatchNonNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), nestedPath);
+ return mightMatchNonNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), nestedPath, searchExecutionContext);
} else if (query instanceof final BooleanQuery bq) {
final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired);
if (hasRequiredClauses) {
@@ -131,13 +123,13 @@ public boolean mightMatchNonNestedDocs(Query query, String nestedPath) {
.stream()
.filter(BooleanClause::isRequired)
.map(BooleanClause::query)
- .allMatch(q -> mightMatchNonNestedDocs(q, nestedPath));
+ .allMatch(q -> mightMatchNonNestedDocs(q, nestedPath, searchExecutionContext));
} else {
return bq.clauses()
.stream()
.filter(c -> c.occur() == Occur.SHOULD)
.map(BooleanClause::query)
- .anyMatch(q -> mightMatchNonNestedDocs(q, nestedPath));
+ .anyMatch(q -> mightMatchNonNestedDocs(q, nestedPath, searchExecutionContext));
}
} else {
return true;
@@ -146,7 +138,7 @@ public boolean mightMatchNonNestedDocs(Query query, String nestedPath) {
/** Returns true if a query on the given field might match parent documents
* or documents that are nested under a different path. */
- boolean mightMatchNonNestedDocs(String field, String nestedPath) {
+ private static boolean mightMatchNonNestedDocs(SearchExecutionContext searchExecutionContext, String field, String nestedPath) {
if (field.startsWith("_")) {
// meta field. Every meta field behaves differently, eg. nested
// documents have the same _uid as their parent, put their path in
@@ -155,9 +147,10 @@ boolean mightMatchNonNestedDocs(String field, String nestedPath) {
// we might add a nested filter when it is nor required.
return true;
}
- if (isMappedFieldPredicate.test(field) == false) {
+ if (searchExecutionContext.isFieldMapped(field) == false) {
return false;
}
+ var nestedLookup = searchExecutionContext.nestedLookup();
String nestedParent = nestedLookup.getNestedParent(field);
if (nestedParent == null || nestedParent.startsWith(nestedPath) == false) {
// the field is not a sub field of the nested path
diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java
index ff514091979c..8b19d72ccc09 100644
--- a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java
+++ b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java
@@ -105,7 +105,7 @@ private Stats(StreamInput in) throws IOException {
suggestTimeInMillis = in.readVLong();
suggestCurrent = in.readVLong();
- if (in.getTransportVersion().onOrAfter(TransportVersions.SEARCH_FAILURE_STATS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
queryFailure = in.readVLong();
fetchFailure = in.readVLong();
}
@@ -129,7 +129,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(suggestTimeInMillis);
out.writeVLong(suggestCurrent);
- if (out.getTransportVersion().onOrAfter(TransportVersions.SEARCH_FAILURE_STATS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVLong(queryFailure);
out.writeVLong(fetchFailure);
}
diff --git a/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java b/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java
index 9c666bd4a35f..ee38273f13da 100644
--- a/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java
+++ b/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java
@@ -44,7 +44,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.ML_INFERENCE_EIS_INTEGRATION_ADDED;
+ return TransportVersions.V_8_16_0;
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java
index ebf32f041155..53ce0bab6361 100644
--- a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java
+++ b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java
@@ -121,7 +121,7 @@ public ModelConfigurations(StreamInput in) throws IOException {
this.service = in.readString();
this.serviceSettings = in.readNamedWriteable(ServiceSettings.class);
this.taskSettings = in.readNamedWriteable(TaskSettings.class);
- this.chunkingSettings = in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_CHUNKING_SETTINGS)
+ this.chunkingSettings = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)
? in.readOptionalNamedWriteable(ChunkingSettings.class)
: null;
}
@@ -133,7 +133,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(service);
out.writeNamedWriteable(serviceSettings);
out.writeNamedWriteable(taskSettings);
- if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_CHUNKING_SETTINGS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalNamedWriteable(chunkingSettings);
}
}
diff --git a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
index e696c38b9f01..ff6a687da9b4 100644
--- a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
+++ b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
@@ -64,7 +64,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+ return TransportVersions.V_8_16_0;
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java
index aa21e5c64d90..aadda93f977b 100644
--- a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java
+++ b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java
@@ -50,7 +50,6 @@
* to have all the plugin information they need prior to starting.
*/
public class PluginsLoader {
-
/**
* Contains information about the {@link ClassLoader} required to load a plugin
*/
@@ -64,18 +63,26 @@ public interface PluginLayer {
* @return The {@link ClassLoader} used to instantiate the main class for the plugin
*/
ClassLoader pluginClassLoader();
+
+ /**
+ * @return The {@link ModuleLayer} for the plugin modules
+ */
+ ModuleLayer pluginModuleLayer();
}
/**
* Contains information about the {@link ClassLoader}s and {@link ModuleLayer} required for loading a plugin
- * @param pluginBundle Information about the bundle of jars used in this plugin
+ *
+ * @param pluginBundle Information about the bundle of jars used in this plugin
* @param pluginClassLoader The {@link ClassLoader} used to instantiate the main class for the plugin
- * @param spiClassLoader The exported {@link ClassLoader} visible to other Java modules
- * @param spiModuleLayer The exported {@link ModuleLayer} visible to other Java modules
+ * @param pluginModuleLayer The {@link ModuleLayer} containing the Java modules of the plugin
+ * @param spiClassLoader The exported {@link ClassLoader} visible to other Java modules
+ * @param spiModuleLayer The exported {@link ModuleLayer} visible to other Java modules
*/
private record LoadedPluginLayer(
PluginBundle pluginBundle,
ClassLoader pluginClassLoader,
+ ModuleLayer pluginModuleLayer,
ClassLoader spiClassLoader,
ModuleLayer spiModuleLayer
) implements PluginLayer {
@@ -103,6 +110,10 @@ public record LayerAndLoader(ModuleLayer layer, ClassLoader loader) {
public static LayerAndLoader ofLoader(ClassLoader loader) {
return new LayerAndLoader(ModuleLayer.boot(), loader);
}
+
+ public static LayerAndLoader ofUberModuleLoader(UberModuleClassLoader loader) {
+ return new LayerAndLoader(loader.getLayer(), loader);
+ }
}
private static final Logger logger = LogManager.getLogger(PluginsLoader.class);
@@ -111,6 +122,7 @@ public static LayerAndLoader ofLoader(ClassLoader loader) {
private final List moduleDescriptors;
private final List pluginDescriptors;
private final Map loadedPluginLayers;
+ private final Set allBundles;
/**
* Constructs a new PluginsLoader
@@ -185,17 +197,19 @@ public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path plug
}
}
- return new PluginsLoader(moduleDescriptors, pluginDescriptors, loadedPluginLayers);
+ return new PluginsLoader(moduleDescriptors, pluginDescriptors, loadedPluginLayers, Set.copyOf(seenBundles));
}
PluginsLoader(
List moduleDescriptors,
List pluginDescriptors,
- Map loadedPluginLayers
+ Map loadedPluginLayers,
+ Set allBundles
) {
this.moduleDescriptors = moduleDescriptors;
this.pluginDescriptors = pluginDescriptors;
this.loadedPluginLayers = loadedPluginLayers;
+ this.allBundles = allBundles;
}
public List moduleDescriptors() {
@@ -210,6 +224,10 @@ public Stream pluginLayers() {
return loadedPluginLayers.values().stream().map(Function.identity());
}
+ public Set allBundles() {
+ return allBundles;
+ }
+
private static void loadPluginLayer(
PluginBundle bundle,
Map loaded,
@@ -239,7 +257,7 @@ private static void loadPluginLayer(
}
final ClassLoader pluginParentLoader = spiLayerAndLoader == null ? parentLoader : spiLayerAndLoader.loader();
- final LayerAndLoader pluginLayerAndLoader = createPlugin(
+ final LayerAndLoader pluginLayerAndLoader = createPluginLayerAndLoader(
bundle,
pluginParentLoader,
extendedPlugins,
@@ -253,7 +271,16 @@ private static void loadPluginLayer(
spiLayerAndLoader = pluginLayerAndLoader;
}
- loaded.put(name, new LoadedPluginLayer(bundle, pluginClassLoader, spiLayerAndLoader.loader, spiLayerAndLoader.layer));
+ loaded.put(
+ name,
+ new LoadedPluginLayer(
+ bundle,
+ pluginClassLoader,
+ pluginLayerAndLoader.layer(),
+ spiLayerAndLoader.loader,
+ spiLayerAndLoader.layer
+ )
+ );
}
static LayerAndLoader createSPI(
@@ -277,7 +304,7 @@ static LayerAndLoader createSPI(
}
}
- static LayerAndLoader createPlugin(
+ private static LayerAndLoader createPluginLayerAndLoader(
PluginBundle bundle,
ClassLoader pluginParentLoader,
List extendedPlugins,
@@ -294,7 +321,7 @@ static LayerAndLoader createPlugin(
return createPluginModuleLayer(bundle, pluginParentLoader, parentLayers, qualifiedExports);
} else if (plugin.isStable()) {
logger.debug(() -> "Loading bundle: " + plugin.getName() + ", non-modular as synthetic module");
- return LayerAndLoader.ofLoader(
+ return LayerAndLoader.ofUberModuleLoader(
UberModuleClassLoader.getInstance(
pluginParentLoader,
ModuleLayer.boot(),
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java
index a09fcbd0c527..7e4d23db7028 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java
@@ -15,7 +15,6 @@
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
@@ -40,9 +39,6 @@
*/
@ServerlessScope(Scope.PUBLIC)
public class RestGetSourceAction extends BaseRestHandler {
- private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetSourceAction.class);
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in get_source and exist_source "
- + "requests is deprecated.";
@Override
public List routes() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java
index 65aa1869a41e..9d39bf7f343c 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java
@@ -13,7 +13,6 @@
import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.Scope;
@@ -28,8 +27,6 @@
@ServerlessScope(Scope.PUBLIC)
public class RestMultiTermVectorsAction extends BaseRestHandler {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMultiTermVectorsAction.class);
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in multi term vector requests is deprecated.";
@Override
public List routes() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java
index c1a55874bfc5..b0e08b376f9d 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java
@@ -14,7 +14,6 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
@@ -36,8 +35,6 @@
@ServerlessScope(Scope.PUBLIC)
public class RestCountAction extends BaseRestHandler {
- private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCountAction.class);
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in count requests is deprecated.";
@Override
public List routes() {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
index ff062084a3cb..a9c2ff7576b0 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -16,7 +16,6 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.core.Booleans;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.features.NodeFeature;
@@ -56,8 +55,6 @@
@ServerlessScope(Scope.PUBLIC)
public class RestSearchAction extends BaseRestHandler {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class);
- public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in search requests is deprecated.";
/**
* Indicates whether hits.total should be rendered as an integer or an object
diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
index 8ac35f7c40ca..b87d097413b6 100644
--- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
+++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
@@ -444,10 +444,9 @@ public void preProcess() {
public Query buildFilteredQuery(Query query) {
List filters = new ArrayList<>();
NestedLookup nestedLookup = searchExecutionContext.nestedLookup();
- NestedHelper nestedHelper = new NestedHelper(nestedLookup, searchExecutionContext::isFieldMapped);
if (nestedLookup != NestedLookup.EMPTY
- && nestedHelper.mightMatchNestedDocs(query)
- && (aliasFilter == null || nestedHelper.mightMatchNestedDocs(aliasFilter))) {
+ && NestedHelper.mightMatchNestedDocs(query, searchExecutionContext)
+ && (aliasFilter == null || NestedHelper.mightMatchNestedDocs(aliasFilter, searchExecutionContext))) {
filters.add(Queries.newNonNestedFilter(searchExecutionContext.indexVersionCreated()));
}
diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java
index a1e8eb25f478..f8d161ef1f5e 100644
--- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java
+++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java
@@ -263,7 +263,7 @@ private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resol
private DateTime(StreamInput in) throws IOException {
String formatterPattern = in.readString();
- Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES)
+ Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)
? LocaleUtils.parse(in.readString())
: DateFieldMapper.DEFAULT_LOCALE;
String zoneId = in.readString();
@@ -297,7 +297,7 @@ public static DateTime readFrom(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(formatter.pattern());
- if (out.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeString(formatter.locale().toString());
}
out.writeString(timeZone.getId());
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java
index 2d360705f75b..c412ecb5d636 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java
@@ -12,6 +12,7 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.support.AggregationPath;
import org.elasticsearch.xcontent.ToXContentObject;
@@ -20,13 +21,12 @@
import java.util.Comparator;
import java.util.List;
import java.util.function.BiFunction;
-import java.util.function.ToLongFunction;
/**
* {@link Bucket} ordering strategy. Buckets can be order either as
* "complete" buckets using {@link #comparator()} or against a combination
* of the buckets internals with its ordinal with
- * {@link #partiallyBuiltBucketComparator(ToLongFunction, Aggregator)}.
+ * {@link #partiallyBuiltBucketComparator(Aggregator)}.
*/
public abstract class BucketOrder implements ToXContentObject, Writeable {
/**
@@ -102,7 +102,7 @@ public final void validate(Aggregator aggregator) throws AggregationExecutionExc
* to validate this order because doing so checks all of the appropriate
* paths.
*/
- partiallyBuiltBucketComparator(null, aggregator);
+ partiallyBuiltBucketComparator(aggregator);
}
/**
@@ -121,7 +121,7 @@ public final void validate(Aggregator aggregator) throws AggregationExecutionExc
* with it all the time.
*
*/
- public abstract Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator);
+ public abstract Comparator> partiallyBuiltBucketComparator(Aggregator aggregator);
/**
* Build a comparator for fully built buckets.
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
index b2ca4a10dc4b..3593eb5adf7e 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
@@ -15,6 +15,7 @@
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.search.aggregations.Aggregator.BucketComparator;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.support.AggregationPath;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.sort.SortValue;
@@ -30,7 +31,6 @@
import java.util.List;
import java.util.Objects;
import java.util.function.BiFunction;
-import java.util.function.ToLongFunction;
/**
* Implementations for {@link Bucket} ordering strategies.
@@ -63,10 +63,10 @@ public AggregationPath path() {
}
@Override
- public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) {
+ public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) {
try {
BucketComparator bucketComparator = path.bucketComparator(aggregator, order);
- return (lhs, rhs) -> bucketComparator.compare(ordinalReader.applyAsLong(lhs), ordinalReader.applyAsLong(rhs));
+ return (lhs, rhs) -> bucketComparator.compare(lhs.ord, rhs.ord);
} catch (IllegalArgumentException e) {
throw new AggregationExecutionException.InvalidPath("Invalid aggregation order path [" + path + "]. " + e.getMessage(), e);
}
@@ -188,12 +188,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
@Override
- public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) {
- List> comparators = orderElements.stream()
- .map(oe -> oe.partiallyBuiltBucketComparator(ordinalReader, aggregator))
- .toList();
+ public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) {
+ List>> comparators = new ArrayList<>(orderElements.size());
+ for (BucketOrder order : orderElements) {
+ comparators.add(order.partiallyBuiltBucketComparator(aggregator));
+ }
return (lhs, rhs) -> {
- for (Comparator c : comparators) {
+ for (Comparator> c : comparators) {
int result = c.compare(lhs, rhs);
if (result != 0) {
return result;
@@ -299,9 +300,9 @@ byte id() {
}
@Override
- public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) {
+ public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) {
Comparator comparator = comparator();
- return comparator::compare;
+ return (lhs, rhs) -> comparator.compare(lhs.bucket, rhs.bucket);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
index 344b90b06c4f..571ce3a9a451 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
@@ -13,6 +13,7 @@
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.core.Releasables;
@@ -26,6 +27,7 @@
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue;
import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds;
import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms;
@@ -38,7 +40,6 @@
import java.util.Arrays;
import java.util.Map;
import java.util.function.BiConsumer;
-import java.util.function.Supplier;
import static java.util.Collections.emptyList;
import static org.elasticsearch.search.aggregations.InternalOrder.isKeyOrder;
@@ -115,51 +116,57 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size());
ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size())
) {
- for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
- int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
-
- // as users can't control sort order, in practice we'll always sort by doc count descending
- try (
- BucketPriorityQueue ordered = new BucketPriorityQueue<>(
- size,
- bigArrays(),
- partiallyBuiltBucketComparator
- )
- ) {
- StringTerms.Bucket spare = null;
- BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx));
- Supplier emptyBucketBuilder = () -> new StringTerms.Bucket(
- new BytesRef(),
- 0,
- null,
- false,
- 0,
- format
- );
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = emptyBucketBuilder.get();
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ // find how many buckets we are going to collect
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), bucketCountThresholds.getShardSize());
+ bucketsToCollect.set(ordIdx, size);
+ ordsToCollect += size;
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ // as users can't control sort order, in practice we'll always sort by doc count descending
+ try (
+ BucketPriorityQueue ordered = new BucketPriorityQueue<>(
+ bucketsToCollect.get(ordIdx),
+ bigArrays(),
+ order.partiallyBuiltBucketComparator(this)
+ )
+ ) {
+ BucketAndOrd spare = null;
+ BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx));
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format));
+ }
+ ordsEnum.readValue(spare.bucket.getTermBytes());
+ spare.bucket.setDocCount(docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+ final int orderedSize = (int) ordered.size();
+ final StringTerms.Bucket[] buckets = new StringTerms.Bucket[orderedSize];
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ buckets[i] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount());
+ bucketAndOrd.bucket.setTermBytes(BytesRef.deepCopyOf(bucketAndOrd.bucket.getTermBytes()));
+ }
+ topBucketsPerOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
}
- ordsEnum.readValue(spare.getTermBytes());
- spare.setDocCount(docCount);
- spare.setBucketOrd(ordsEnum.ord());
- spare = ordered.insertWithOverflow(spare);
- }
-
- topBucketsPerOrd.set(ordIdx, new StringTerms.Bucket[(int) ordered.size()]);
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- topBucketsPerOrd.get(ordIdx)[i] = ordered.pop();
- otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount());
- topBucketsPerOrd.get(ordIdx)[i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd.get(ordIdx)[i].getTermBytes()));
}
+ assert ordsCollected == ordsArray.size();
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, InternalTerms.Bucket::setAggregations);
}
}
- buildSubAggsForAllBuckets(topBucketsPerOrd, InternalTerms.Bucket::getBucketOrd, InternalTerms.Bucket::setAggregations);
-
return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> {
final BucketOrder reduceOrder;
if (isKeyOrder(order) == false) {
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java
index 7f8e5c8c885f..9550003a5bd1 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java
@@ -13,17 +13,17 @@
import java.util.Comparator;
-public class BucketPriorityQueue extends ObjectArrayPriorityQueue {
+public class BucketPriorityQueue extends ObjectArrayPriorityQueue> {
- private final Comparator super B> comparator;
+ private final Comparator> comparator;
- public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator super B> comparator) {
+ public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator> comparator) {
super(size, bigArrays);
this.comparator = comparator;
}
@Override
- protected boolean lessThan(B a, B b) {
+ protected boolean lessThan(BucketAndOrd a, BucketAndOrd b) {
return comparator.compare(a, b) > 0; // reverse, since we reverse again when adding to a list
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java
index fe751c9e7918..4736f52d9362 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java
@@ -12,14 +12,14 @@
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
-public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue {
+public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue> {
public BucketSignificancePriorityQueue(int size, BigArrays bigArrays) {
super(size, bigArrays);
}
@Override
- protected boolean lessThan(SignificantTerms.Bucket o1, SignificantTerms.Bucket o2) {
- return o1.getSignificanceScore() < o2.getSignificanceScore();
+ protected boolean lessThan(BucketAndOrd o1, BucketAndOrd o2) {
+ return o1.bucket.getSignificanceScore() < o2.bucket.getSignificanceScore();
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java
index 0ec03a6f56dd..439b61cc43dd 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java
@@ -20,6 +20,7 @@
import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.CheckedSupplier;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.common.util.ObjectArray;
@@ -561,10 +562,10 @@ InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOExc
) {
GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
final int size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize());
- try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) {
+ try (ObjectArrayPriorityQueue> ordered = collectionStrategy.buildPriorityQueue(size)) {
BucketUpdater updater = collectionStrategy.bucketUpdater(0, lookupGlobalOrd);
collect(new BucketInfoConsumer() {
- TB spare = null;
+ BucketAndOrd spare = null;
@Override
public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException {
@@ -572,24 +573,31 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep
if (docCount >= bucketCountThresholds.getShardMinDocCount()) {
if (spare == null) {
checkRealMemoryCBForInternalBucket();
- spare = collectionStrategy.buildEmptyTemporaryBucket();
+ spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket());
}
- updater.updateBucket(spare, globalOrd, bucketOrd, docCount);
+ spare.ord = bucketOrd;
+ updater.updateBucket(spare.bucket, globalOrd, docCount);
spare = ordered.insertWithOverflow(spare);
}
}
});
// Get the top buckets
- topBucketsPreOrd.set(0, collectionStrategy.buildBuckets((int) ordered.size()));
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- checkRealMemoryCBForInternalBucket();
- B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd);
- topBucketsPreOrd.get(0)[i] = bucket;
- otherDocCount.increment(0, -bucket.getDocCount());
+ int orderedSize = (int) ordered.size();
+ try (LongArray ordsArray = bigArrays().newLongArray(orderedSize)) {
+ B[] buckets = collectionStrategy.buildBuckets(orderedSize);
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ checkRealMemoryCBForInternalBucket();
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd);
+ ordsArray.set(i, bucketAndOrd.ord);
+ buckets[i] = bucket;
+ otherDocCount.increment(0, -bucket.getDocCount());
+ }
+ topBucketsPreOrd.set(0, buckets);
+ collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray);
}
}
- collectionStrategy.buildSubAggs(topBucketsPreOrd);
return GlobalOrdinalsStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> collectionStrategy.buildResult(
@@ -710,39 +718,61 @@ InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOExc
LongArray otherDocCount = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPreOrd = collectionStrategy.buildTopBucketsPerOrd(owningBucketOrds.size())
) {
- GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
- for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) {
- long owningBucketOrd = owningBucketOrds.get(ordIdx);
- collectZeroDocEntriesIfNeeded(owningBucketOrds.get(ordIdx));
- int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
- try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) {
- BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd);
- LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
- TB spare = null;
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCount.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = collectionStrategy.buildEmptyTemporaryBucket();
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ collectZeroDocEntriesIfNeeded(owningBucketOrd);
+ final int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
+ ordsToCollect += size;
+ bucketsToCollect.set(ordIdx, size);
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
+ for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) {
+ long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ try (
+ ObjectArrayPriorityQueue> ordered = collectionStrategy.buildPriorityQueue(
+ bucketsToCollect.get(ordIdx)
+ )
+ ) {
+ BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd);
+ LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+ BucketAndOrd spare = null;
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCount.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket());
+ }
+ updater.updateBucket(spare.bucket, ordsEnum.value(), docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+ // Get the top buckets
+ int orderedSize = (int) ordered.size();
+ B[] buckets = collectionStrategy.buildBuckets(orderedSize);
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ checkRealMemoryCBForInternalBucket();
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd);
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ buckets[i] = bucket;
+ otherDocCount.increment(ordIdx, -bucket.getDocCount());
+ }
+ topBucketsPreOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
}
- updater.updateBucket(spare, ordsEnum.value(), ordsEnum.ord(), docCount);
- spare = ordered.insertWithOverflow(spare);
- }
- // Get the top buckets
- topBucketsPreOrd.set(ordIdx, collectionStrategy.buildBuckets((int) ordered.size()));
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- checkRealMemoryCBForInternalBucket();
- B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd);
- topBucketsPreOrd.get(ordIdx)[i] = bucket;
- otherDocCount.increment(ordIdx, -bucket.getDocCount());
}
+ assert ordsCollected == ordsArray.size();
+ collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray);
}
}
- collectionStrategy.buildSubAggs(topBucketsPreOrd);
return GlobalOrdinalsStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> collectionStrategy.buildResult(
@@ -791,7 +821,7 @@ abstract class ResultStrategy<
* Build a {@link PriorityQueue} to sort the buckets. After we've
* collected all of the buckets we'll collect all entries in the queue.
*/
- abstract ObjectArrayPriorityQueue buildPriorityQueue(int size);
+ abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size);
/**
* Build an array to hold the "top" buckets for each ordinal.
@@ -813,7 +843,7 @@ abstract class ResultStrategy<
* Build the sub-aggregations into the buckets. This will usually
* delegate to {@link #buildSubAggsForAllBuckets}.
*/
- abstract void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException;
+ abstract void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException;
/**
* Turn the buckets into an aggregation result.
@@ -834,7 +864,7 @@ abstract class ResultStrategy<
}
interface BucketUpdater {
- void updateBucket(TB spare, long globalOrd, long bucketOrd, long docCount) throws IOException;
+ void updateBucket(TB spare, long globalOrd, long docCount) throws IOException;
}
/**
@@ -868,29 +898,30 @@ OrdBucket buildEmptyTemporaryBucket() {
@Override
BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) {
- return (spare, globalOrd, bucketOrd, docCount) -> {
+ return (spare, globalOrd, docCount) -> {
spare.globalOrd = globalOrd;
- spare.bucketOrd = bucketOrd;
spare.docCount = docCount;
};
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
- return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator);
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
+ return new BucketPriorityQueue<>(
+ size,
+ bigArrays(),
+ order.partiallyBuiltBucketComparator(GlobalOrdinalsStringTermsAggregator.this)
+ );
}
@Override
StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp, GlobalOrdLookupFunction lookupGlobalOrd) throws IOException {
BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd));
- StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
- result.bucketOrd = temp.bucketOrd;
- return result;
+ return new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
}
@Override
- void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPreOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
@@ -1005,8 +1036,7 @@ private long subsetSize(long owningBucketOrd) {
@Override
BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) {
long subsetSize = subsetSize(owningBucketOrd);
- return (spare, globalOrd, bucketOrd, docCount) -> {
- spare.bucketOrd = bucketOrd;
+ return (spare, globalOrd, docCount) -> {
oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
spare.subsetDf = docCount;
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
@@ -1020,7 +1050,7 @@ BucketUpdater bucketUpdater(long owningBucketOrd,
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
return new BucketSignificancePriorityQueue<>(size, bigArrays());
}
@@ -1033,8 +1063,8 @@ SignificantStringTerms.Bucket convertTempBucketToRealBucket(
}
@Override
- void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPreOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
index 78ae2481f5d9..5108793b8a80 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
@@ -10,12 +10,12 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
import org.elasticsearch.common.util.ObjectObjectPagedHashMap;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationErrors;
import org.elasticsearch.search.aggregations.AggregationReduceContext;
-import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorReducer;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
@@ -58,12 +58,6 @@ public interface Reader> {
long subsetDf;
long supersetDf;
- /**
- * Ordinal of the bucket while it is being built. Not used after it is
- * returned from {@link Aggregator#buildAggregations(org.elasticsearch.common.util.LongArray)} and not
- * serialized.
- */
- transient long bucketOrd;
double score;
protected InternalAggregations aggregations;
final transient DocValueFormat format;
@@ -235,7 +229,12 @@ canLeadReduction here is essentially checking if this shard returned data. Unma
public InternalAggregation get() {
final SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext);
final int size = (int) (reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()));
- try (BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size, reduceContext.bigArrays())) {
+ try (ObjectArrayPriorityQueue ordered = new ObjectArrayPriorityQueue(size, reduceContext.bigArrays()) {
+ @Override
+ protected boolean lessThan(B a, B b) {
+ return a.getSignificanceScore() < b.getSignificanceScore();
+ }
+ }) {
buckets.forEach(entry -> {
final B b = createBucket(
entry.value.subsetDf[0],
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
index 739f0b923eaa..de35046691b3 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
@@ -38,8 +38,6 @@ public interface Reader> {
B read(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException;
}
- long bucketOrd;
-
protected long docCount;
private long docCountError;
protected InternalAggregations aggregations;
@@ -88,14 +86,6 @@ public void setDocCount(long docCount) {
this.docCount = docCount;
}
- public long getBucketOrd() {
- return bucketOrd;
- }
-
- public void setBucketOrd(long bucketOrd) {
- this.bucketOrd = bucketOrd;
- }
-
@Override
public long getDocCountError() {
return docCountError;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java
index b96c495d3748..026912a583ef 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java
@@ -17,6 +17,7 @@
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
@@ -43,6 +44,7 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.Comparator;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Function;
@@ -287,40 +289,55 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(Math.toIntExact(owningBucketOrds.size()))
) {
- for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
- long owningOrd = owningBucketOrds.get(ordIdx);
- collectZeroDocEntriesIfNeeded(owningOrd, excludeDeletedDocs);
- int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
-
- try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) {
- B spare = null;
- BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd);
- BucketUpdater bucketUpdater = bucketUpdater(owningOrd);
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = buildEmptyBucket();
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs);
+ final int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
+ ordsToCollect += size;
+ bucketsToCollect.set(ordIdx, size);
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
+ long owningOrd = owningBucketOrds.get(ordIdx);
+ try (ObjectArrayPriorityQueue> ordered = buildPriorityQueue(bucketsToCollect.get(ordIdx))) {
+ BucketAndOrd spare = null;
+ BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd);
+ BucketUpdater bucketUpdater = bucketUpdater(owningOrd);
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(buildEmptyBucket());
+ }
+ bucketUpdater.updateBucket(spare.bucket, ordsEnum, docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+
+ final int orderedSize = (int) ordered.size();
+ final B[] buckets = buildBuckets(orderedSize);
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ finalizeBucket(bucketAndOrd.bucket);
+ buckets[i] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount());
+ }
+ topBucketsPerOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
}
- bucketUpdater.updateBucket(spare, ordsEnum, docCount);
- spare = ordered.insertWithOverflow(spare);
- }
-
- topBucketsPerOrd.set(ordIdx, buildBuckets((int) ordered.size()));
- for (int i = (int) ordered.size() - 1; i >= 0; --i) {
- topBucketsPerOrd.get(ordIdx)[i] = ordered.pop();
- otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount());
- finalizeBucket(topBucketsPerOrd.get(ordIdx)[i]);
}
+ assert ordsCollected == ordsArray.size();
+ buildSubAggs(topBucketsPerOrd, ordsArray);
}
}
-
- buildSubAggs(topBucketsPerOrd);
-
return MapStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx))
@@ -355,7 +372,7 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
* Build a {@link PriorityQueue} to sort the buckets. After we've
* collected all of the buckets we'll collect all entries in the queue.
*/
- abstract ObjectArrayPriorityQueue buildPriorityQueue(int size);
+ abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size);
/**
* Update fields in {@code spare} to reflect information collected for
@@ -382,9 +399,9 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
/**
* Build the sub-aggregations into the buckets. This will usually
- * delegate to {@link #buildSubAggsForAllBuckets}.
+ * delegate to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)}.
*/
- abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException;
+ abstract void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException;
/**
* Turn the buckets into an aggregation result.
@@ -407,9 +424,11 @@ interface BucketUpdater
*/
class StandardTermsResults extends ResultStrategy {
private final ValuesSource valuesSource;
+ private final Comparator> comparator;
- StandardTermsResults(ValuesSource valuesSource) {
+ StandardTermsResults(ValuesSource valuesSource, Aggregator aggregator) {
this.valuesSource = valuesSource;
+ this.comparator = order.partiallyBuiltBucketComparator(aggregator);
}
@Override
@@ -498,8 +517,8 @@ StringTerms.Bucket buildEmptyBucket() {
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
- return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator);
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
+ return new BucketPriorityQueue<>(size, bigArrays(), comparator);
}
@Override
@@ -507,7 +526,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
return (spare, ordsEnum, docCount) -> {
ordsEnum.readValue(spare.termBytes);
spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
};
}
@@ -532,8 +550,8 @@ void finalizeBucket(StringTerms.Bucket bucket) {
}
@Override
- void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+ void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordArray, (b, a) -> b.aggregations = a);
}
@Override
@@ -625,7 +643,7 @@ SignificantStringTerms.Bucket buildEmptyBucket() {
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
return new BucketSignificancePriorityQueue<>(size, bigArrays());
}
@@ -634,7 +652,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd)
long subsetSize = subsetSizes.get(owningBucketOrd);
return (spare, ordsEnum, docCount) -> {
ordsEnum.readValue(spare.termBytes);
- spare.bucketOrd = ordsEnum.ord();
spare.subsetDf = docCount;
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
/*
@@ -667,8 +684,8 @@ void finalizeBucket(SignificantStringTerms.Bucket bucket) {
}
@Override
- void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+ void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, a) -> b.aggregations = a);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java
index 5d4c15d8a3b8..a54053f712f8 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java
@@ -14,6 +14,7 @@
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
@@ -40,6 +41,7 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.Comparator;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Function;
@@ -167,42 +169,56 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.size())
) {
- for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
- final long owningBucketOrd = owningBucketOrds.get(ordIdx);
- collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs);
- long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd);
-
- int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
- try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) {
- B spare = null;
- BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
- BucketUpdater bucketUpdater = bucketUpdater(owningBucketOrd);
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = buildEmptyBucket();
- }
- bucketUpdater.updateBucket(spare, ordsEnum, docCount);
- spare = ordered.insertWithOverflow(spare);
- }
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs);
+ int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize());
+ bucketsToCollect.set(ordIdx, size);
+ ordsToCollect += size;
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ try (ObjectArrayPriorityQueue> ordered = buildPriorityQueue(bucketsToCollect.get(ordIdx))) {
+ BucketAndOrd spare = null;
+ BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+ BucketUpdater bucketUpdater = bucketUpdater(owningBucketOrd);
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(buildEmptyBucket());
+ }
+ bucketUpdater.updateBucket(spare.bucket, ordsEnum, docCount);
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
+
+ // Get the top buckets
+ final int orderedSize = (int) ordered.size();
+ final B[] bucketsForOrd = buildBuckets(orderedSize);
+ for (int b = orderedSize - 1; b >= 0; --b) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ bucketsForOrd[b] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + b, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount());
+ }
+ topBucketsPerOrd.set(ordIdx, bucketsForOrd);
+ ordsCollected += orderedSize;
- // Get the top buckets
- B[] bucketsForOrd = buildBuckets((int) ordered.size());
- topBucketsPerOrd.set(ordIdx, bucketsForOrd);
- for (int b = (int) ordered.size() - 1; b >= 0; --b) {
- topBucketsPerOrd.get(ordIdx)[b] = ordered.pop();
- otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[b].getDocCount());
+ }
}
+ assert ordsCollected == ordsArray.size();
+ buildSubAggs(topBucketsPerOrd, ordsArray);
}
}
-
- buildSubAggs(topBucketsPerOrd);
-
return NumericTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx))
@@ -254,13 +270,13 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro
* Build a {@link ObjectArrayPriorityQueue} to sort the buckets. After we've
* collected all of the buckets we'll collect all entries in the queue.
*/
- abstract ObjectArrayPriorityQueue buildPriorityQueue(int size);
+ abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size);
/**
* Build the sub-aggregations into the buckets. This will usually
- * delegate to {@link #buildSubAggsForAllBuckets}.
+ * delegate to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)}.
*/
- abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException;
+ abstract void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException;
/**
* Collect extra entries for "zero" hit documents if they were requested
@@ -287,9 +303,11 @@ interface BucketUpdater
abstract class StandardTermsResultStrategy, B extends InternalTerms.Bucket> extends
ResultStrategy {
protected final boolean showTermDocCountError;
+ private final Comparator> comparator;
- StandardTermsResultStrategy(boolean showTermDocCountError) {
+ StandardTermsResultStrategy(boolean showTermDocCountError, Aggregator aggregator) {
this.showTermDocCountError = showTermDocCountError;
+ this.comparator = order.partiallyBuiltBucketComparator(aggregator);
}
@Override
@@ -298,13 +316,13 @@ final LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
}
@Override
- final ObjectArrayPriorityQueue buildPriorityQueue(int size) {
- return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator);
+ final ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
+ return new BucketPriorityQueue<>(size, bigArrays(), comparator);
}
@Override
- final void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ final void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
@@ -340,8 +358,8 @@ public final void close() {}
}
class LongTermsResults extends StandardTermsResultStrategy {
- LongTermsResults(boolean showTermDocCountError) {
- super(showTermDocCountError);
+ LongTermsResults(boolean showTermDocCountError, Aggregator aggregator) {
+ super(showTermDocCountError, aggregator);
}
@Override
@@ -374,7 +392,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
return (LongTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) -> {
spare.term = ordsEnum.value();
spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
};
}
@@ -424,8 +441,8 @@ LongTerms buildEmptyResult() {
class DoubleTermsResults extends StandardTermsResultStrategy {
- DoubleTermsResults(boolean showTermDocCountError) {
- super(showTermDocCountError);
+ DoubleTermsResults(boolean showTermDocCountError, Aggregator aggregator) {
+ super(showTermDocCountError, aggregator);
}
@Override
@@ -458,7 +475,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
return (DoubleTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) -> {
spare.term = NumericUtils.sortableLongToDouble(ordsEnum.value());
spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
};
}
@@ -575,7 +591,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
spare.term = ordsEnum.value();
spare.subsetDf = docCount;
spare.supersetDf = backgroundFrequencies.freq(spare.term);
- spare.bucketOrd = ordsEnum.ord();
// During shard-local down-selection we use subset/superset stats that are for this shard only
// Back at the central reducer these properties will be updated with global stats
spare.updateScore(significanceHeuristic, subsetSize, supersetSize);
@@ -583,13 +598,13 @@ BucketUpdater bucketUpdater(long owningBucketOrd) {
}
@Override
- ObjectArrayPriorityQueue buildPriorityQueue(int size) {
+ ObjectArrayPriorityQueue> buildPriorityQueue(int size) {
return new BucketSignificancePriorityQueue<>(size, bigArrays());
}
@Override
- void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException {
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException {
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
index 4922be7cec1b..c07c0726a4ae 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
@@ -27,7 +27,6 @@
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
@@ -190,7 +189,6 @@ public boolean equals(Object obj) {
protected final DocValueFormat format;
protected final BucketCountThresholds bucketCountThresholds;
protected final BucketOrder order;
- protected final Comparator> partiallyBuiltBucketComparator;
protected final Set aggsUsedForSorting;
protected final SubAggCollectionMode collectMode;
@@ -209,7 +207,9 @@ public TermsAggregator(
super(name, factories, context, parent, metadata);
this.bucketCountThresholds = bucketCountThresholds;
this.order = order;
- partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+ if (order != null) {
+ order.validate(this);
+ }
this.format = format;
if ((subAggsNeedScore() && descendsFromNestedAggregator(parent)) || context.isInSortOrderExecutionRequired()) {
/**
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
index 2c7b768fcdbb..da5ae37b0822 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
@@ -195,12 +195,12 @@ private static TermsAggregatorSupplier numericSupplier() {
if (includeExclude != null) {
longFilter = includeExclude.convertToDoubleFilter();
}
- resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError);
+ resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError, agg);
} else {
if (includeExclude != null) {
longFilter = includeExclude.convertToLongFilter(valuesSourceConfig.format());
}
- resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError);
+ resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError, agg);
}
return new NumericTermsAggregator(
name,
@@ -403,7 +403,7 @@ Aggregator create(
name,
factories,
new MapStringTermsAggregator.ValuesSourceCollectorSource(valuesSourceConfig),
- a -> a.new StandardTermsResults(valuesSourceConfig.getValuesSource()),
+ a -> a.new StandardTermsResults(valuesSourceConfig.getValuesSource(), a),
order,
valuesSourceConfig.format(),
bucketCountThresholds,
diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 098a2b2f45d2..3554a6dc08b9 100644
--- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -19,7 +19,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Booleans;
@@ -92,7 +91,6 @@
* @see SearchRequest#source(SearchSourceBuilder)
*/
public final class SearchSourceBuilder implements Writeable, ToXContentObject, Rewriteable {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SearchSourceBuilder.class);
public static final ParseField FROM_FIELD = new ParseField("from");
public static final ParseField SIZE_FIELD = new ParseField("size");
diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
index f7f8cee30ee1..9eb0170af5ef 100644
--- a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
+++ b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
@@ -102,6 +102,14 @@ private SearchLookup(SearchLookup searchLookup, Set fieldChain) {
this.fieldLookupProvider = searchLookup.fieldLookupProvider;
}
+ private SearchLookup(SearchLookup searchLookup, SourceProvider sourceProvider, Set fieldChain) {
+ this.fieldChain = Collections.unmodifiableSet(fieldChain);
+ this.sourceProvider = sourceProvider;
+ this.fieldTypeLookup = searchLookup.fieldTypeLookup;
+ this.fieldDataLookup = searchLookup.fieldDataLookup;
+ this.fieldLookupProvider = searchLookup.fieldLookupProvider;
+ }
+
/**
* Creates a copy of the current {@link SearchLookup} that looks fields up in the same way, but also tracks field references
* in order to detect cycles and prevent resolving fields that depend on more than {@link #MAX_FIELD_CHAIN_DEPTH} other fields.
@@ -144,4 +152,8 @@ public IndexFieldData> getForField(MappedFieldType fieldType, MappedFieldType.
public Source getSource(LeafReaderContext ctx, int doc) throws IOException {
return sourceProvider.getSource(ctx, doc);
}
+
+ public SearchLookup swapSourceProvider(SourceProvider sourceProvider) {
+ return new SearchLookup(this, sourceProvider, fieldChain);
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java
index 9ab14aa9362b..d4127836a4e4 100644
--- a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java
+++ b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java
@@ -44,7 +44,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.RANK_DOCS_RETRIEVER;
+ return TransportVersions.V_8_16_0;
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java
index db839de9f573..2ab6395db73b 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java
@@ -20,6 +20,7 @@
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.TransportMultiSearchAction;
+import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.rest.RestStatus;
@@ -46,6 +47,8 @@
*/
public abstract class CompoundRetrieverBuilder> extends RetrieverBuilder {
+ public static final NodeFeature INNER_RETRIEVERS_FILTER_SUPPORT = new NodeFeature("inner_retrievers_filter_support");
+
public record RetrieverSource(RetrieverBuilder retriever, SearchSourceBuilder source) {}
protected final int rankWindowSize;
@@ -64,9 +67,9 @@ public T addChild(RetrieverBuilder retrieverBuilder) {
/**
* Returns a clone of the original retriever, replacing the sub-retrievers with
- * the provided {@code newChildRetrievers}.
+ * the provided {@code newChildRetrievers} and the filters with the {@code newPreFilterQueryBuilders}.
*/
- protected abstract T clone(List newChildRetrievers);
+ protected abstract T clone(List newChildRetrievers, List newPreFilterQueryBuilders);
/**
* Combines the provided {@code rankResults} to return the final top documents.
@@ -85,13 +88,25 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio
}
// Rewrite prefilters
- boolean hasChanged = false;
+ // We eagerly rewrite prefilters, because some of the innerRetrievers
+ // could be compound too, so we want to propagate all the necessary filter information to them
+ // and have it available as part of their own rewrite step
var newPreFilters = rewritePreFilters(ctx);
- hasChanged |= newPreFilters != preFilterQueryBuilders;
+ if (newPreFilters != preFilterQueryBuilders) {
+ return clone(innerRetrievers, newPreFilters);
+ }
+ boolean hasChanged = false;
// Rewrite retriever sources
List newRetrievers = new ArrayList<>();
for (var entry : innerRetrievers) {
+ // we propagate the filters only for compound retrievers as they won't be attached through
+ // the createSearchSourceBuilder.
+ // We could remove this check, but we would end up adding the same filters
+ // multiple times in case an inner retriever rewrites itself, when we re-enter to rewrite
+ if (entry.retriever.isCompound() && false == preFilterQueryBuilders.isEmpty()) {
+ entry.retriever.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders);
+ }
RetrieverBuilder newRetriever = entry.retriever.rewrite(ctx);
if (newRetriever != entry.retriever) {
newRetrievers.add(new RetrieverSource(newRetriever, null));
@@ -106,7 +121,7 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio
}
}
if (hasChanged) {
- return clone(newRetrievers);
+ return clone(newRetrievers, newPreFilters);
}
// execute searches
@@ -166,12 +181,7 @@ public void onFailure(Exception e) {
});
});
- return new RankDocsRetrieverBuilder(
- rankWindowSize,
- newRetrievers.stream().map(s -> s.retriever).toList(),
- results::get,
- newPreFilters
- );
+ return new RankDocsRetrieverBuilder(rankWindowSize, newRetrievers.stream().map(s -> s.retriever).toList(), results::get);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java
index 8be9a78dae15..f1464c41ca3b 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java
@@ -184,8 +184,7 @@ public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException {
ll.onResponse(null);
}));
});
- var rewritten = new KnnRetrieverBuilder(this, () -> toSet.get(), null);
- return rewritten;
+ return new KnnRetrieverBuilder(this, () -> toSet.get(), null);
}
return super.rewrite(ctx);
}
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java
index 02f890f51d01..4d3f3fefd446 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java
@@ -33,19 +33,13 @@ public class RankDocsRetrieverBuilder extends RetrieverBuilder {
final List sources;
final Supplier rankDocs;
- public RankDocsRetrieverBuilder(
- int rankWindowSize,
- List sources,
- Supplier rankDocs,
- List preFilterQueryBuilders
- ) {
+ public RankDocsRetrieverBuilder(int rankWindowSize, List sources, Supplier rankDocs) {
this.rankWindowSize = rankWindowSize;
this.rankDocs = rankDocs;
if (sources == null || sources.isEmpty()) {
throw new IllegalArgumentException("sources must not be null or empty");
}
this.sources = sources;
- this.preFilterQueryBuilders = preFilterQueryBuilders;
}
@Override
@@ -73,10 +67,6 @@ private boolean sourceShouldRewrite(QueryRewriteContext ctx) throws IOException
@Override
public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException {
assert false == sourceShouldRewrite(ctx) : "retriever sources should be rewritten first";
- var rewrittenFilters = rewritePreFilters(ctx);
- if (rewrittenFilters != preFilterQueryBuilders) {
- return new RankDocsRetrieverBuilder(rankWindowSize, sources, rankDocs, rewrittenFilters);
- }
return this;
}
@@ -94,7 +84,7 @@ public QueryBuilder topDocsQuery() {
boolQuery.should(query);
}
}
- // ignore prefilters of this level, they are already propagated to children
+ // ignore prefilters of this level, they were already propagated to children
return boolQuery;
}
@@ -133,7 +123,7 @@ public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder
} else {
rankQuery = new RankDocsQueryBuilder(rankDocResults, null, false);
}
- // ignore prefilters of this level, they are already propagated to children
+ // ignore prefilters of this level, they were already propagated to children
searchSourceBuilder.query(rankQuery);
if (sourceHasMinScore()) {
searchSourceBuilder.minScore(this.minScore() == null ? Float.MIN_VALUE : this.minScore());
diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
index 6640f0f85840..2aaade35fb8f 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
@@ -28,7 +28,6 @@
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.fielddata.FieldData;
@@ -67,7 +66,6 @@
* A geo distance based sorting on a geo point like field.
*/
public class GeoDistanceSortBuilder extends SortBuilder {
- private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(GeoDistanceSortBuilder.class);
public static final String NAME = "_geo_distance";
public static final String ALTERNATIVE_NAME = "_geoDistance";
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java
index c8670a8dfeec..77d708432cf2 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java
@@ -55,8 +55,7 @@ public ExactKnnQueryBuilder(StreamInput in) throws IOException {
this.query = VectorData.fromFloats(in.readFloatArray());
}
this.field = in.readString();
- if (in.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS)
- || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
this.vectorSimilarity = in.readOptionalFloat();
} else {
this.vectorSimilarity = null;
@@ -88,8 +87,7 @@ protected void doWriteTo(StreamOutput out) throws IOException {
out.writeFloatArray(query.asFloatVector());
}
out.writeString(field);
- if (out.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS)
- || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
out.writeOptionalFloat(vectorSimilarity);
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java
index f52addefc8b1..b5ba97906f0e 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java
@@ -71,8 +71,7 @@ public KnnScoreDocQueryBuilder(StreamInput in) throws IOException {
this.fieldName = null;
this.queryVector = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS)
- || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
this.vectorSimilarity = in.readOptionalFloat();
} else {
this.vectorSimilarity = null;
@@ -116,8 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException {
out.writeBoolean(false);
}
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS)
- || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
out.writeOptionalFloat(vectorSimilarity);
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
index deb7e6bd035b..5dd2cbf32dd1 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
@@ -481,10 +481,9 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException {
}
parentBitSet = context.bitsetFilter(parentFilter);
if (filterQuery != null) {
- NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped);
// We treat the provided filter as a filter over PARENT documents, so if it might match nested documents
// we need to adjust it.
- if (nestedHelper.mightMatchNestedDocs(filterQuery)) {
+ if (NestedHelper.mightMatchNestedDocs(filterQuery, context)) {
// Ensure that the query only returns parent documents matching `filterQuery`
filterQuery = Queries.filtered(filterQuery, parentFilter);
}
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java
index f34b87669747..231894875b7f 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java
@@ -101,7 +101,7 @@ public String getWriteableName() {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.REGISTER_SLM_STATS;
+ return TransportVersions.V_8_16_0;
}
@Override
@@ -171,7 +171,7 @@ public void writeTo(StreamOutput out) throws IOException {
@Override
public TransportVersion getMinimalSupportedVersion() {
- return TransportVersions.REGISTER_SLM_STATS;
+ return TransportVersions.V_8_16_0;
}
}
diff --git a/server/src/test/java/org/elasticsearch/TransportVersionTests.java b/server/src/test/java/org/elasticsearch/TransportVersionTests.java
index 6c2cc5c1f4cc..08b12cec2e17 100644
--- a/server/src/test/java/org/elasticsearch/TransportVersionTests.java
+++ b/server/src/test/java/org/elasticsearch/TransportVersionTests.java
@@ -211,7 +211,7 @@ public void testDenseTransportVersions() {
Set missingVersions = new TreeSet<>();
TransportVersion previous = null;
for (var tv : TransportVersions.getAllVersions()) {
- if (tv.before(TransportVersions.V_8_15_2)) {
+ if (tv.before(TransportVersions.V_8_16_0)) {
continue;
}
if (previous == null) {
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java
index f37b1d1b4171..cfdbfdfbfcf8 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java
@@ -23,7 +23,7 @@
public class NodesStatsRequestParametersTests extends ESTestCase {
public void testReadWriteMetricSet() {
- for (var version : List.of(TransportVersions.V_8_15_0, TransportVersions.NODES_STATS_ENUM_SET)) {
+ for (var version : List.of(TransportVersions.V_8_15_0, TransportVersions.V_8_16_0)) {
var randSet = randomSubsetOf(Metric.ALL);
var metricsOut = randSet.isEmpty() ? EnumSet.noneOf(Metric.class) : EnumSet.copyOf(randSet);
try {
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java
index 89ccd4ab63d7..46b757407e6a 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java
@@ -199,7 +199,7 @@ public void testSerializationBWC() throws IOException {
randomQueryUsage(QUERY_TYPES.size()),
version.onOrAfter(TransportVersions.V_8_12_0) ? randomRescorerUsage(RESCORER_TYPES.size()) : Map.of(),
randomSectionsUsage(SECTIONS.size()),
- version.onOrAfter(TransportVersions.RETRIEVERS_TELEMETRY_ADDED) ? randomRetrieversUsage(RETRIEVERS.size()) : Map.of(),
+ version.onOrAfter(TransportVersions.V_8_16_0) ? randomRetrieversUsage(RETRIEVERS.size()) : Map.of(),
randomLongBetween(0, Long.MAX_VALUE)
);
assertSerialization(testInstance, version);
diff --git a/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java
new file mode 100644
index 000000000000..798b576500d7
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java
@@ -0,0 +1,257 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.bootstrap;
+
+import org.elasticsearch.plugins.PluginBundle;
+import org.elasticsearch.plugins.PluginDescriptor;
+import org.elasticsearch.plugins.PluginsLoader;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.compiler.InMemoryJavaCompiler;
+import org.elasticsearch.test.jar.JarUtils;
+
+import java.io.IOException;
+import java.lang.module.Configuration;
+import java.lang.module.ModuleFinder;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Stream;
+
+import static java.util.Map.entry;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+@ESTestCase.WithoutSecurityManager
+public class PluginsResolverTests extends ESTestCase {
+
+ private record TestPluginLayer(PluginBundle pluginBundle, ClassLoader pluginClassLoader, ModuleLayer pluginModuleLayer)
+ implements
+ PluginsLoader.PluginLayer {}
+
+ public void testResolveModularPlugin() throws IOException, ClassNotFoundException {
+ String moduleName = "modular.plugin";
+ String pluginName = "modular-plugin";
+
+ final Path home = createTempDir();
+
+ Path jar = createModularPluginJar(home, pluginName, moduleName, "p", "A");
+
+ var layer = createModuleLayer(moduleName, jar);
+ var loader = layer.findLoader(moduleName);
+
+ PluginBundle bundle = createMockBundle(pluginName, moduleName, "p.A");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+
+ when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, layer)));
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+
+ var testClass = loader.loadClass("p.A");
+ var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass);
+ var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class);
+ var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class);
+
+ assertEquals(pluginName, resolvedPluginName);
+ assertNull(unresolvedPluginName1);
+ assertNull(unresolvedPluginName2);
+ }
+
+ public void testResolveMultipleModularPlugins() throws IOException, ClassNotFoundException {
+ final Path home = createTempDir();
+
+ Path jar1 = createModularPluginJar(home, "plugin1", "module.one", "p", "A");
+ Path jar2 = createModularPluginJar(home, "plugin2", "module.two", "q", "B");
+
+ var layer1 = createModuleLayer("module.one", jar1);
+ var loader1 = layer1.findLoader("module.one");
+ var layer2 = createModuleLayer("module.two", jar2);
+ var loader2 = layer2.findLoader("module.two");
+
+ PluginBundle bundle1 = createMockBundle("plugin1", "module.one", "p.A");
+ PluginBundle bundle2 = createMockBundle("plugin2", "module.two", "q.B");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+
+ when(mockPluginsLoader.pluginLayers()).thenReturn(
+ Stream.of(new TestPluginLayer(bundle1, loader1, layer1), new TestPluginLayer(bundle2, loader2, layer2))
+ );
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+
+ var testClass1 = loader1.loadClass("p.A");
+ var testClass2 = loader2.loadClass("q.B");
+ var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1);
+ var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2);
+
+ assertEquals("plugin1", resolvedPluginName1);
+ assertEquals("plugin2", resolvedPluginName2);
+ }
+
+ public void testResolveReferencedModulesInModularPlugins() throws IOException, ClassNotFoundException {
+ final Path home = createTempDir();
+
+ Path dependencyJar = createModularPluginJar(home, "plugin1", "module.one", "p", "A");
+ Path pluginJar = home.resolve("plugin2.jar");
+
+ Map sources = Map.ofEntries(
+ entry("module-info", "module module.two { exports q; requires module.one; }"),
+ entry("q.B", "package q; public class B { public p.A a = null; }")
+ );
+
+ var classToBytes = InMemoryJavaCompiler.compile(sources, "--add-modules", "module.one", "-p", home.toString());
+ JarUtils.createJarWithEntries(
+ pluginJar,
+ Map.ofEntries(entry("module-info.class", classToBytes.get("module-info")), entry("q/B.class", classToBytes.get("q.B")))
+ );
+
+ var layer = createModuleLayer("module.two", pluginJar, dependencyJar);
+ var loader = layer.findLoader("module.two");
+
+ PluginBundle bundle = createMockBundle("plugin2", "module.two", "q.B");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+
+ when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, layer)));
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+
+ var testClass1 = loader.loadClass("p.A");
+ var testClass2 = loader.loadClass("q.B");
+ var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1);
+ var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2);
+
+ assertEquals("plugin2", resolvedPluginName1);
+ assertEquals("plugin2", resolvedPluginName2);
+ }
+
+ public void testResolveMultipleNonModularPlugins() throws IOException, ClassNotFoundException {
+ final Path home = createTempDir();
+
+ Path jar1 = createNonModularPluginJar(home, "plugin1", "p", "A");
+ Path jar2 = createNonModularPluginJar(home, "plugin2", "q", "B");
+
+ try (var loader1 = createClassLoader(jar1); var loader2 = createClassLoader(jar2)) {
+
+ PluginBundle bundle1 = createMockBundle("plugin1", null, "p.A");
+ PluginBundle bundle2 = createMockBundle("plugin2", null, "q.B");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+
+ when(mockPluginsLoader.pluginLayers()).thenReturn(
+ Stream.of(
+ new TestPluginLayer(bundle1, loader1, ModuleLayer.boot()),
+ new TestPluginLayer(bundle2, loader2, ModuleLayer.boot())
+ )
+ );
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+
+ var testClass1 = loader1.loadClass("p.A");
+ var testClass2 = loader2.loadClass("q.B");
+ var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1);
+ var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2);
+
+ assertEquals("plugin1", resolvedPluginName1);
+ assertEquals("plugin2", resolvedPluginName2);
+ }
+ }
+
+ public void testResolveNonModularPlugin() throws IOException, ClassNotFoundException {
+ String pluginName = "non-modular-plugin";
+
+ final Path home = createTempDir();
+
+ Path jar = createNonModularPluginJar(home, pluginName, "p", "A");
+
+ try (var loader = createClassLoader(jar)) {
+ PluginBundle bundle = createMockBundle(pluginName, null, "p.A");
+ PluginsLoader mockPluginsLoader = mock(PluginsLoader.class);
+
+ when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, ModuleLayer.boot())));
+ PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader);
+
+ var testClass = loader.loadClass("p.A");
+ var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass);
+ var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class);
+ var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class);
+
+ assertEquals(pluginName, resolvedPluginName);
+ assertNull(unresolvedPluginName1);
+ assertNull(unresolvedPluginName2);
+ }
+ }
+
+ private static URLClassLoader createClassLoader(Path jar) throws MalformedURLException {
+ return new URLClassLoader(new URL[] { jar.toUri().toURL() });
+ }
+
+ private static ModuleLayer createModuleLayer(String moduleName, Path... jars) {
+ var finder = ModuleFinder.of(jars);
+ Configuration cf = ModuleLayer.boot().configuration().resolve(finder, ModuleFinder.of(), Set.of(moduleName));
+ var moduleController = ModuleLayer.defineModulesWithOneLoader(
+ cf,
+ List.of(ModuleLayer.boot()),
+ ClassLoader.getPlatformClassLoader()
+ );
+ return moduleController.layer();
+ }
+
+ private static PluginBundle createMockBundle(String pluginName, String moduleName, String fqClassName) {
+ PluginDescriptor pd = new PluginDescriptor(
+ pluginName,
+ null,
+ null,
+ null,
+ null,
+ fqClassName,
+ moduleName,
+ List.of(),
+ false,
+ false,
+ true,
+ false
+ );
+
+ PluginBundle bundle = mock(PluginBundle.class);
+ when(bundle.pluginDescriptor()).thenReturn(pd);
+ return bundle;
+ }
+
+ private static Path createModularPluginJar(Path home, String pluginName, String moduleName, String packageName, String className)
+ throws IOException {
+ Path jar = home.resolve(pluginName + ".jar");
+ String fqClassName = packageName + "." + className;
+
+ Map sources = Map.ofEntries(
+ entry("module-info", "module " + moduleName + " { exports " + packageName + "; }"),
+ entry(fqClassName, "package " + packageName + "; public class " + className + " {}")
+ );
+
+ var classToBytes = InMemoryJavaCompiler.compile(sources);
+ JarUtils.createJarWithEntries(
+ jar,
+ Map.ofEntries(
+ entry("module-info.class", classToBytes.get("module-info")),
+ entry(packageName + "/" + className + ".class", classToBytes.get(fqClassName))
+ )
+ );
+ return jar;
+ }
+
+ private static Path createNonModularPluginJar(Path home, String pluginName, String packageName, String className) throws IOException {
+ Path jar = home.resolve(pluginName + ".jar");
+ String fqClassName = packageName + "." + className;
+
+ Map sources = Map.ofEntries(
+ entry(fqClassName, "package " + packageName + "; public class " + className + " {}")
+ );
+
+ var classToBytes = InMemoryJavaCompiler.compile(sources);
+ JarUtils.createJarWithEntries(jar, Map.ofEntries(entry(packageName + "/" + className + ".class", classToBytes.get(fqClassName))));
+ return jar;
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java
index d2b6d0a6ec6d..afaa7a9a3288 100644
--- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java
+++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java
@@ -11,6 +11,7 @@
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@@ -53,8 +54,6 @@
import static java.time.Instant.ofEpochSecond;
import static java.time.ZonedDateTime.ofInstant;
-import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT;
-import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT_BROKEN;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasToString;
@@ -729,15 +728,11 @@ public void testReadAfterReachingEndOfStream() throws IOException {
}
public void testZonedDateTimeSerialization() throws IOException {
- checkZonedDateTimeSerialization(ZDT_NANOS_SUPPORT);
- }
-
- public void testZonedDateTimeMillisBwcSerializationV1() throws IOException {
- checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT_BROKEN));
+ checkZonedDateTimeSerialization(TransportVersions.V_8_16_0);
}
public void testZonedDateTimeMillisBwcSerialization() throws IOException {
- checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT));
+ checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_16_0));
}
public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOException {
@@ -745,12 +740,12 @@ public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOExcept
assertGenericRoundtrip(ofInstant(ofEpochSecond(1), randomZone()), tv);
// just want to test a large number that will use 5+ bytes
long maxEpochSecond = Integer.MAX_VALUE;
- long minEpochSecond = tv.between(ZDT_NANOS_SUPPORT_BROKEN, ZDT_NANOS_SUPPORT) ? 0 : Integer.MIN_VALUE;
+ long minEpochSecond = Integer.MIN_VALUE;
assertGenericRoundtrip(ofInstant(ofEpochSecond(maxEpochSecond), randomZone()), tv);
assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond)), randomZone()), tv);
assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 1_000_000), randomZone()), tv);
assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_000_000), randomZone()), tv);
- if (tv.onOrAfter(ZDT_NANOS_SUPPORT)) {
+ if (tv.onOrAfter(TransportVersions.V_8_16_0)) {
assertGenericRoundtrip(
ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_999_999), randomZone()),
tv
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
index 55f6cc5498d8..4135ead545e0 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
@@ -11,6 +11,9 @@
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
import org.elasticsearch.xcontent.XContentGenerator;
+import org.elasticsearch.xcontent.XContentParseException;
+import org.elasticsearch.xcontent.XContentParser;
+import org.elasticsearch.xcontent.XContentParserConfiguration;
import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xcontent.json.JsonXContent;
@@ -28,4 +31,14 @@ public void testBigInteger() throws Exception {
XContentGenerator generator = JsonXContent.jsonXContent.createGenerator(os);
doTestBigInteger(generator, os);
}
+
+ public void testMalformedJsonFieldThrowsXContentException() throws Exception {
+ String json = "{\"test\":\"/*/}";
+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) {
+ parser.nextToken();
+ parser.nextToken();
+ parser.nextToken();
+ assertThrows(XContentParseException.class, () -> parser.text());
+ }
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java
index a7a1d33badf2..b2583eb176de 100644
--- a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java
@@ -17,6 +17,7 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.join.ScoreMode;
+import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperServiceTestCase;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
@@ -27,12 +28,15 @@
import java.io.IOException;
import java.util.Collections;
+import static java.util.Collections.emptyMap;
import static org.mockito.Mockito.mock;
public class NestedHelperTests extends MapperServiceTestCase {
MapperService mapperService;
+ SearchExecutionContext searchExecutionContext;
+
@Override
public void setUp() throws Exception {
super.setUp();
@@ -68,167 +72,185 @@ public void setUp() throws Exception {
} }
""";
mapperService = createMapperService(mapping);
- }
-
- private static NestedHelper buildNestedHelper(MapperService mapperService) {
- return new NestedHelper(mapperService.mappingLookup().nestedLookup(), field -> mapperService.fieldType(field) != null);
+ searchExecutionContext = new SearchExecutionContext(
+ 0,
+ 0,
+ mapperService.getIndexSettings(),
+ null,
+ null,
+ mapperService,
+ mapperService.mappingLookup(),
+ null,
+ null,
+ parserConfig(),
+ writableRegistry(),
+ null,
+ null,
+ System::currentTimeMillis,
+ null,
+ null,
+ () -> true,
+ null,
+ emptyMap(),
+ MapperMetrics.NOOP
+ );
}
public void testMatchAll() {
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(new MatchAllDocsQuery()));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(new MatchAllDocsQuery(), searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(new MatchAllDocsQuery(), "nested_missing", searchExecutionContext));
}
public void testMatchNo() {
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(new MatchNoDocsQuery()));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested1"));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested2"));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested3"));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(new MatchNoDocsQuery(), searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested1", searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested2", searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested3", searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(new MatchNoDocsQuery(), "nested_missing", searchExecutionContext));
}
public void testTermsQuery() {
Query termsQuery = mapperService.fieldType("foo").termsQuery(Collections.singletonList("bar"), null);
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
termsQuery = mapperService.fieldType("nested1.foo").termsQuery(Collections.singletonList("bar"), null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
termsQuery = mapperService.fieldType("nested2.foo").termsQuery(Collections.singletonList("bar"), null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
termsQuery = mapperService.fieldType("nested3.foo").termsQuery(Collections.singletonList("bar"), null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termsQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termsQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termsQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termsQuery, "nested_missing", searchExecutionContext));
}
public void testTermQuery() {
Query termQuery = mapperService.fieldType("foo").termQuery("bar", null);
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
termQuery = mapperService.fieldType("nested1.foo").termQuery("bar", null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
termQuery = mapperService.fieldType("nested2.foo").termQuery("bar", null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
termQuery = mapperService.fieldType("nested3.foo").termQuery("bar", null);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(termQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(termQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(termQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(termQuery, "nested_missing", searchExecutionContext));
}
public void testRangeQuery() {
SearchExecutionContext context = mock(SearchExecutionContext.class);
Query rangeQuery = mapperService.fieldType("foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
rangeQuery = mapperService.fieldType("nested1.foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
rangeQuery = mapperService.fieldType("nested2.foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
rangeQuery = mapperService.fieldType("nested3.foo2").rangeQuery(2, 5, true, true, null, null, null, context);
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(rangeQuery, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(rangeQuery, "nested_missing", searchExecutionContext));
}
public void testDisjunction() {
BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
.build();
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested1.foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("nested1.foo", "baz")), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("nested2.foo", "baz")), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.SHOULD)
.add(new TermQuery(new Term("nested3.foo", "baz")), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested1.foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.SHOULD)
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
}
private static Occur requiredOccur() {
@@ -239,42 +261,42 @@ public void testConjunction() {
BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested1.foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertFalse(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertFalse(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), requiredOccur())
.add(new MatchAllDocsQuery(), requiredOccur())
.build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested1"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested1", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested2"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested2", searchExecutionContext));
bq = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), requiredOccur()).add(new MatchAllDocsQuery(), requiredOccur()).build();
- assertTrue(buildNestedHelper(mapperService).mightMatchNestedDocs(bq));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(bq, "nested3"));
+ assertTrue(NestedHelper.mightMatchNestedDocs(bq, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(bq, "nested3", searchExecutionContext));
}
public void testNested() throws IOException {
@@ -288,11 +310,11 @@ public void testNested() throws IOException {
.build();
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
queryBuilder = new NestedQueryBuilder("nested1", new TermQueryBuilder("nested1.foo", "bar"), ScoreMode.Avg);
query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context);
@@ -301,11 +323,11 @@ public void testNested() throws IOException {
expectedChildQuery = new TermQuery(new Term("nested1.foo", "bar"));
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
queryBuilder = new NestedQueryBuilder("nested2", new TermQueryBuilder("nested2.foo", "bar"), ScoreMode.Avg);
query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context);
@@ -316,11 +338,11 @@ public void testNested() throws IOException {
.build();
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
queryBuilder = new NestedQueryBuilder("nested3", new TermQueryBuilder("nested3.foo", "bar"), ScoreMode.Avg);
query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context);
@@ -331,10 +353,10 @@ public void testNested() throws IOException {
.build();
assertEquals(expectedChildQuery, query.getChildQuery());
- assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(query));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested1"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested2"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested3"));
- assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(query, "nested_missing"));
+ assertFalse(NestedHelper.mightMatchNestedDocs(query, searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested1", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested2", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested3", searchExecutionContext));
+ assertTrue(NestedHelper.mightMatchNonNestedDocs(query, "nested_missing", searchExecutionContext));
}
}
diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
index d1ccfcbe7873..89fd25f638e1 100644
--- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
@@ -95,7 +95,6 @@
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.builder.PointInTimeBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.search.collapse.CollapseBuilder;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest;
@@ -124,7 +123,6 @@
import org.elasticsearch.search.rank.feature.RankFeatureResult;
import org.elasticsearch.search.rank.feature.RankFeatureShardRequest;
import org.elasticsearch.search.rank.feature.RankFeatureShardResult;
-import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.tasks.TaskCancelHelper;
import org.elasticsearch.tasks.TaskCancelledException;
@@ -2930,119 +2928,6 @@ public void testSlicingBehaviourForParallelCollection() throws Exception {
}
}
- /**
- * This method tests validation that happens on the data nodes, which is now performed on the coordinating node.
- * We still need the validation to cover for mixed cluster scenarios where the coordinating node does not perform the check yet.
- */
- public void testParseSourceValidation() {
- String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT);
- IndexService indexService = createIndex(index);
- final SearchService service = getInstanceFromNode(SearchService.class);
- {
- // scroll and search_after
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.scroll(new TimeValue(1000));
- searchRequest.source().searchAfter(new String[] { "value" });
- assertCreateContextValidation(searchRequest, "`search_after` cannot be used in a scroll context.", indexService, service);
- }
- {
- // scroll and collapse
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.scroll(new TimeValue(1000));
- searchRequest.source().collapse(new CollapseBuilder("field"));
- assertCreateContextValidation(searchRequest, "cannot use `collapse` in a scroll context", indexService, service);
- }
- {
- // search_after and `from` isn't valid
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().searchAfter(new String[] { "value" });
- searchRequest.source().from(10);
- assertCreateContextValidation(
- searchRequest,
- "`from` parameter must be set to 0 when `search_after` is used",
- indexService,
- service
- );
- }
- {
- // slice without scroll or pit
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().slice(new SliceBuilder(1, 10));
- assertCreateContextValidation(
- searchRequest,
- "[slice] can only be used with [scroll] or [point-in-time] requests",
- indexService,
- service
- );
- }
- {
- // stored fields disabled with _source requested
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().storedField("_none_");
- searchRequest.source().fetchSource(true);
- assertCreateContextValidation(
- searchRequest,
- "[stored_fields] cannot be disabled if [_source] is requested",
- indexService,
- service
- );
- }
- {
- // stored fields disabled with fetch fields requested
- SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
- searchRequest.source().storedField("_none_");
- searchRequest.source().fetchSource(false);
- searchRequest.source().fetchField("field");
- assertCreateContextValidation(
- searchRequest,
- "[stored_fields] cannot be disabled when using the [fields] option",
- indexService,
- service
- );
- }
- }
-
- private static void assertCreateContextValidation(
- SearchRequest searchRequest,
- String errorMessage,
- IndexService indexService,
- SearchService searchService
- ) {
- ShardId shardId = new ShardId(indexService.index(), 0);
- long nowInMillis = System.currentTimeMillis();
- String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10);
- searchRequest.allowPartialSearchResults(randomBoolean());
- ShardSearchRequest request = new ShardSearchRequest(
- OriginalIndices.NONE,
- searchRequest,
- shardId,
- 0,
- indexService.numberOfShards(),
- AliasFilter.EMPTY,
- 1f,
- nowInMillis,
- clusterAlias
- );
-
- SearchShardTask task = new SearchShardTask(1, "type", "action", "description", null, emptyMap());
-
- ReaderContext readerContext = null;
- try {
- ReaderContext createOrGetReaderContext = searchService.createOrGetReaderContext(request);
- readerContext = createOrGetReaderContext;
- IllegalArgumentException exception = expectThrows(
- IllegalArgumentException.class,
- () -> searchService.createContext(createOrGetReaderContext, request, task, ResultsType.QUERY, randomBoolean())
- );
- assertThat(exception.getMessage(), containsString(errorMessage));
- } finally {
- if (readerContext != null) {
- readerContext.close();
- searchService.freeReaderContext(readerContext.id());
- }
- }
- }
-
private static ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) {
return new ReaderContext(
new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()),
diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java
index af6782c45dce..ccf33c0b71b6 100644
--- a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java
@@ -95,12 +95,7 @@ private List preFilters(QueryRewriteContext queryRewriteContext) t
}
private RankDocsRetrieverBuilder createRandomRankDocsRetrieverBuilder(QueryRewriteContext queryRewriteContext) throws IOException {
- return new RankDocsRetrieverBuilder(
- randomIntBetween(1, 100),
- innerRetrievers(queryRewriteContext),
- rankDocsSupplier(),
- preFilters(queryRewriteContext)
- );
+ return new RankDocsRetrieverBuilder(randomIntBetween(1, 100), innerRetrievers(queryRewriteContext), rankDocsSupplier());
}
public void testExtractToSearchSourceBuilder() throws IOException {
diff --git a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java
index c47c8c16f6a2..5733a51bb7e9 100644
--- a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java
+++ b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java
@@ -27,9 +27,9 @@
/**
* A SearchPlugin to exercise query vector builder
*/
-class TestQueryVectorBuilderPlugin implements SearchPlugin {
+public class TestQueryVectorBuilderPlugin implements SearchPlugin {
- static class TestQueryVectorBuilder implements QueryVectorBuilder {
+ public static class TestQueryVectorBuilder implements QueryVectorBuilder {
private static final String NAME = "test_query_vector_builder";
private static final ParseField QUERY_VECTOR = new ParseField("query_vector");
@@ -47,11 +47,11 @@ static class TestQueryVectorBuilder implements QueryVectorBuilder {
private List vectorToBuild;
- TestQueryVectorBuilder(List vectorToBuild) {
+ public TestQueryVectorBuilder(List vectorToBuild) {
this.vectorToBuild = vectorToBuild;
}
- TestQueryVectorBuilder(float[] expected) {
+ public TestQueryVectorBuilder(float[] expected) {
this.vectorToBuild = new ArrayList<>(expected.length);
for (float f : expected) {
vectorToBuild.add(f);
diff --git a/settings.gradle b/settings.gradle
index 4722fc311480..747fbb3e439f 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -73,6 +73,7 @@ List projects = [
'distribution:packages:aarch64-rpm',
'distribution:packages:rpm',
'distribution:bwc:bugfix',
+ 'distribution:bwc:bugfix2',
'distribution:bwc:maintenance',
'distribution:bwc:minor',
'distribution:bwc:staged',
diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java
index a9a825af3b86..91875600ec00 100644
--- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java
+++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java
@@ -45,7 +45,7 @@ public MockPluginsService(Settings settings, Environment environment, Collection
super(
settings,
environment.configFile(),
- new PluginsLoader(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap())
+ new PluginsLoader(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap(), Collections.emptySet())
);
List pluginsLoaded = new ArrayList<>();
diff --git a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java
index 9f199aa7f3ef..4a5f280c10a9 100644
--- a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java
+++ b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java
@@ -10,6 +10,7 @@
package org.elasticsearch.search.retriever;
import org.apache.lucene.search.ScoreDoc;
+import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.rank.RankDoc;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -23,16 +24,17 @@ public class TestCompoundRetrieverBuilder extends CompoundRetrieverBuilder(), rankWindowSize);
+ this(new ArrayList<>(), rankWindowSize, new ArrayList<>());
}
- TestCompoundRetrieverBuilder(List childRetrievers, int rankWindowSize) {
+ TestCompoundRetrieverBuilder(List childRetrievers, int rankWindowSize, List preFilterQueryBuilders) {
super(childRetrievers, rankWindowSize);
+ this.preFilterQueryBuilders = preFilterQueryBuilders;
}
@Override
- protected TestCompoundRetrieverBuilder clone(List newChildRetrievers) {
- return new TestCompoundRetrieverBuilder(newChildRetrievers, rankWindowSize);
+ protected TestCompoundRetrieverBuilder clone(List newChildRetrievers, List newPreFilterQueryBuilders) {
+ return new TestCompoundRetrieverBuilder(newChildRetrievers, rankWindowSize, newPreFilterQueryBuilders);
}
@Override
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
index b4f4243fb90f..4428afaaeabe 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
@@ -333,8 +333,11 @@ public void initClient() throws IOException {
assert testFeatureServiceInitialized() == false;
clusterHosts = parseClusterHosts(getTestRestCluster());
logger.info("initializing REST clients against {}", clusterHosts);
- client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()]));
- adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()]));
+ var clientSettings = restClientSettings();
+ var adminSettings = restAdminSettings();
+ var hosts = clusterHosts.toArray(new HttpHost[0]);
+ client = buildClient(clientSettings, hosts);
+ adminClient = clientSettings.equals(adminSettings) ? client : buildClient(adminSettings, hosts);
availableFeatures = EnumSet.of(ProductFeature.LEGACY_TEMPLATES);
Set versions = new HashSet<>();
diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
index 0d42a2856a10..85510c8a989c 100644
--- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
+++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
@@ -37,9 +37,6 @@ public class InternalMultiTerms extends AbstractInternalTerms {
-
- long bucketOrd;
-
protected long docCount;
protected InternalAggregations aggregations;
private long docCountError;
diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
index 1691aedf543f..5c10e2c8feeb 100644
--- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
+++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
@@ -20,6 +20,7 @@
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.common.util.ObjectArrayPriorityQueue;
@@ -40,6 +41,7 @@
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd;
import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue;
import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
@@ -72,7 +74,7 @@ class MultiTermsAggregator extends DeferableBucketAggregator {
protected final List formats;
protected final TermsAggregator.BucketCountThresholds bucketCountThresholds;
protected final BucketOrder order;
- protected final Comparator partiallyBuiltBucketComparator;
+ protected final Comparator> partiallyBuiltBucketComparator;
protected final Set aggsUsedForSorting;
protected final SubAggCollectionMode collectMode;
private final List values;
@@ -99,7 +101,7 @@ protected MultiTermsAggregator(
super(name, factories, context, parent, metadata);
this.bucketCountThresholds = bucketCountThresholds;
this.order = order;
- partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+ partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(this);
this.formats = formats;
this.showTermDocCountError = showTermDocCountError;
if (subAggsNeedScore() && descendsFromNestedAggregator(parent) || context.isInSortOrderExecutionRequired()) {
@@ -242,52 +244,67 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw
LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true);
ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size())
) {
- for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
- final long owningBucketOrd = owningBucketOrds.get(ordIdx);
- long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd);
-
- int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
- try (
- ObjectArrayPriorityQueue ordered = new BucketPriorityQueue<>(
- size,
- bigArrays(),
- partiallyBuiltBucketComparator
- )
- ) {
- InternalMultiTerms.Bucket spare = null;
- BytesRef spareKey = null;
- BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
- while (ordsEnum.next()) {
- long docCount = bucketDocCount(ordsEnum.ord());
- otherDocCounts.increment(ordIdx, docCount);
- if (docCount < bucketCountThresholds.getShardMinDocCount()) {
- continue;
- }
- if (spare == null) {
- checkRealMemoryCBForInternalBucket();
- spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters);
- spareKey = new BytesRef();
- }
- ordsEnum.readValue(spareKey);
- spare.terms = unpackTerms(spareKey);
- spare.docCount = docCount;
- spare.bucketOrd = ordsEnum.ord();
- spare = ordered.insertWithOverflow(spare);
- }
+ try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) {
+ long ordsToCollect = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), bucketCountThresholds.getShardSize());
+ ordsToCollect += size;
+ bucketsToCollect.set(ordIdx, size);
+ }
+ try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) {
+ long ordsCollected = 0;
+ for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) {
+ final long owningBucketOrd = owningBucketOrds.get(ordIdx);
+ long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd);
+
+ int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
+ try (
+ ObjectArrayPriorityQueue> ordered = new BucketPriorityQueue<>(
+ size,
+ bigArrays(),
+ partiallyBuiltBucketComparator
+ )
+ ) {
+ BucketAndOrd spare = null;
+ BytesRef spareKey = null;
+ BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+ while (ordsEnum.next()) {
+ long docCount = bucketDocCount(ordsEnum.ord());
+ otherDocCounts.increment(ordIdx, docCount);
+ if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+ continue;
+ }
+ if (spare == null) {
+ checkRealMemoryCBForInternalBucket();
+ spare = new BucketAndOrd<>(
+ new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters)
+ );
+ spareKey = new BytesRef();
+ }
+ ordsEnum.readValue(spareKey);
+ spare.bucket.terms = unpackTerms(spareKey);
+ spare.bucket.docCount = docCount;
+ spare.ord = ordsEnum.ord();
+ spare = ordered.insertWithOverflow(spare);
+ }
- // Get the top buckets
- InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[(int) ordered.size()];
- topBucketsPerOrd.set(ordIdx, bucketsForOrd);
- for (int b = (int) ordered.size() - 1; b >= 0; --b) {
- InternalMultiTerms.Bucket[] buckets = topBucketsPerOrd.get(ordIdx);
- buckets[b] = ordered.pop();
- otherDocCounts.increment(ordIdx, -buckets[b].getDocCount());
+ // Get the top buckets
+ int orderedSize = (int) ordered.size();
+ InternalMultiTerms.Bucket[] buckets = new InternalMultiTerms.Bucket[orderedSize];
+ for (int i = orderedSize - 1; i >= 0; --i) {
+ BucketAndOrd bucketAndOrd = ordered.pop();
+ buckets[i] = bucketAndOrd.bucket;
+ ordsArray.set(ordsCollected + i, bucketAndOrd.ord);
+ otherDocCounts.increment(ordIdx, -buckets[i].getDocCount());
+ }
+ topBucketsPerOrd.set(ordIdx, buckets);
+ ordsCollected += orderedSize;
+ }
}
+ buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, a) -> b.aggregations = a);
}
}
- buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
-
return buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> buildResult(otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx))
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java
index 7a31888a440c..a61a86eea710 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java
@@ -111,7 +111,7 @@ public LifecycleStats(
}
public static LifecycleStats read(StreamInput in) throws IOException {
- if (in.getTransportVersion().onOrAfter(TransportVersions.GLOBAL_RETENTION_TELEMETRY)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
return new LifecycleStats(
in.readVLong(),
in.readBoolean(),
@@ -139,7 +139,7 @@ public static LifecycleStats read(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().onOrAfter(TransportVersions.GLOBAL_RETENTION_TELEMETRY)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVLong(dataStreamsWithLifecyclesCount);
out.writeBoolean(defaultRolloverUsed);
dataRetentionStats.writeTo(out);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java
index 0457de6edcc9..36322ed6c6cb 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java
@@ -209,7 +209,7 @@ public CacheStats(StreamInput in) throws IOException {
in.readVLong(),
in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readLong() : -1,
in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readLong() : -1,
- in.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_STATS_SIZE_ADDED) ? in.readLong() : -1
+ in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readLong() : -1
);
}
@@ -237,7 +237,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeLong(hitsTimeInMillis);
out.writeLong(missesTimeInMillis);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_STATS_SIZE_ADDED)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeLong(cacheSizeInBytes);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java
index 33402671a223..5d635c97d9c8 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java
@@ -328,7 +328,7 @@ public IndexLifecycleExplainResponse(StreamInput in) throws IOException {
} else {
indexCreationDate = null;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.RETAIN_ILM_STEP_INFO)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
previousStepInfo = in.readOptionalBytesReference();
} else {
previousStepInfo = null;
@@ -379,7 +379,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) {
out.writeOptionalLong(indexCreationDate);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.RETAIN_ILM_STEP_INFO)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalBytesReference(previousStepInfo);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java
index c06dcc0f083d..da64df2672bd 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java
@@ -8,6 +8,7 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexAbstraction;
@@ -32,7 +33,6 @@
import java.util.List;
import java.util.Objects;
-import static org.elasticsearch.TransportVersions.ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE;
import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY;
import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_SNAPSHOT_NAME_SETTING_KEY;
import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY;
@@ -102,9 +102,7 @@ public SearchableSnapshotAction(String snapshotRepository) {
public SearchableSnapshotAction(StreamInput in) throws IOException {
this.snapshotRepository = in.readString();
this.forceMergeIndex = in.readBoolean();
- this.totalShardsPerNode = in.getTransportVersion().onOrAfter(ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE)
- ? in.readOptionalInt()
- : null;
+ this.totalShardsPerNode = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalInt() : null;
}
boolean isForceMergeIndex() {
@@ -424,7 +422,7 @@ public String getWriteableName() {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(snapshotRepository);
out.writeBoolean(forceMergeIndex);
- if (out.getTransportVersion().onOrAfter(ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalInt(totalShardsPerNode);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java
index 226fe3630b38..c3f991a8b4e1 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java
@@ -127,7 +127,7 @@ public Response(StreamInput in) throws IOException {
pipelineIds = Set.of();
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
indexes = in.readCollectionAsSet(StreamInput::readString);
dryRunMessage = in.readOptionalString();
} else {
@@ -143,7 +143,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
out.writeCollection(pipelineIds, StreamOutput::writeString);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeCollection(indexes, StreamOutput::writeString);
out.writeOptionalString(dryRunMessage);
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java
index ea0462d0f103..ba3d417d0267 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java
@@ -63,7 +63,7 @@ public Request(StreamInput in) throws IOException {
this.inferenceEntityId = in.readString();
this.taskType = TaskType.fromStream(in);
if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)
- || in.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) {
+ || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_16_0)) {
this.persistDefaultConfig = in.readBoolean();
} else {
this.persistDefaultConfig = PERSIST_DEFAULT_CONFIGS;
@@ -89,7 +89,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(inferenceEntityId);
taskType.writeTo(out);
if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)
- || out.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) {
+ || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_16_0)) {
out.writeBoolean(this.persistDefaultConfig);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java
index 0645299dfc30..8c4611f05e72 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java
@@ -66,7 +66,7 @@ public MachineLearningFeatureSetUsage(StreamInput in) throws IOException {
this.analyticsUsage = in.readGenericMap();
this.inferenceUsage = in.readGenericMap();
this.nodeCount = in.readInt();
- if (in.getTransportVersion().onOrAfter(TransportVersions.ML_TELEMETRY_MEMORY_ADDED)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.memoryUsage = in.readGenericMap();
} else {
this.memoryUsage = Map.of();
@@ -86,7 +86,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeGenericMap(analyticsUsage);
out.writeGenericMap(inferenceUsage);
out.writeInt(nodeCount);
- if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TELEMETRY_MEMORY_ADDED)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeGenericMap(memoryUsage);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java
index c6976ab4b513..2aedb4634753 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java
@@ -47,7 +47,7 @@ public Request(StartTrainedModelDeploymentAction.TaskParams taskParams, Adaptive
public Request(StreamInput in) throws IOException {
super(in);
this.taskParams = new StartTrainedModelDeploymentAction.TaskParams(in);
- if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new);
} else {
this.adaptiveAllocationsSettings = null;
@@ -63,7 +63,7 @@ public ActionRequestValidationException validate() {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
taskParams.writeTo(out);
- if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(adaptiveAllocationsSettings);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java
index b298d486c9e0..1bf92262b30f 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java
@@ -169,7 +169,7 @@ public Request(StreamInput in) throws IOException {
modelId = in.readString();
timeout = in.readTimeValue();
waitForState = in.readEnum(AllocationStatus.State.class);
- if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
numberOfAllocations = in.readOptionalVInt();
} else {
numberOfAllocations = in.readVInt();
@@ -189,7 +189,7 @@ public Request(StreamInput in) throws IOException {
} else {
this.deploymentId = modelId;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new);
} else {
this.adaptiveAllocationsSettings = null;
@@ -297,7 +297,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(modelId);
out.writeTimeValue(timeout);
out.writeEnum(waitForState);
- if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalVInt(numberOfAllocations);
} else {
out.writeVInt(numberOfAllocations);
@@ -313,7 +313,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeString(deploymentId);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(adaptiveAllocationsSettings);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java
index cb578fdb157d..2018c9526ec8 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java
@@ -87,7 +87,7 @@ public Request(String deploymentId) {
public Request(StreamInput in) throws IOException {
super(in);
deploymentId = in.readString();
- if (in.getTransportVersion().before(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) {
numberOfAllocations = in.readVInt();
adaptiveAllocationsSettings = null;
isInternal = false;
@@ -134,7 +134,7 @@ public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(deploymentId);
- if (out.getTransportVersion().before(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) {
out.writeVInt(numberOfAllocations);
} else {
out.writeOptionalVInt(numberOfAllocations);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java
index b007c1da451f..742daa1bf613 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java
@@ -115,7 +115,7 @@ public ScheduledEvent(StreamInput in) throws IOException {
description = in.readString();
startTime = in.readInstant();
endTime = in.readInstant();
- if (in.getTransportVersion().onOrAfter(TransportVersions.ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
skipResult = in.readBoolean();
skipModelUpdate = in.readBoolean();
forceTimeShift = in.readOptionalInt();
@@ -204,7 +204,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(description);
out.writeInstant(startTime);
out.writeInstant(endTime);
- if (out.getTransportVersion().onOrAfter(TransportVersions.ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeBoolean(skipResult);
out.writeBoolean(skipModelUpdate);
out.writeOptionalInt(forceTimeShift);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java
index 858d97bf6f95..31b513eea161 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java
@@ -483,7 +483,7 @@ public AssignmentStats(StreamInput in) throws IOException {
} else {
deploymentId = modelId;
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new);
} else {
adaptiveAllocationsSettings = null;
@@ -666,7 +666,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeString(deploymentId);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(adaptiveAllocationsSettings);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java
index efd07cceae09..249e27d6f25e 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java
@@ -178,7 +178,7 @@ public TrainedModelAssignment(StreamInput in) throws IOException {
} else {
this.maxAssignedAllocations = totalCurrentAllocations();
}
- if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new);
} else {
this.adaptiveAllocationsSettings = null;
@@ -382,7 +382,7 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) {
out.writeVInt(maxAssignedAllocations);
}
- if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalWriteable(adaptiveAllocationsSettings);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java
index 9929e59a9c80..a4d7c9c7fa08 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java
@@ -41,7 +41,6 @@
public class LearningToRankConfig extends RegressionConfig implements Rewriteable {
public static final ParseField NAME = new ParseField("learning_to_rank");
- static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersions.LTR_SERVERLESS_RELEASE;
public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values");
public static final ParseField FEATURE_EXTRACTORS = new ParseField("feature_extractors");
public static final ParseField DEFAULT_PARAMS = new ParseField("default_params");
@@ -226,7 +225,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() {
@Override
public TransportVersion getMinimalSupportedTransportVersion() {
- return MIN_SUPPORTED_TRANSPORT_VERSION;
+ return TransportVersions.V_8_16_0;
}
@Override
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java
index eb952a7dc7e5..4bdced325311 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java
@@ -68,7 +68,7 @@ public DetectionRule(StreamInput in) throws IOException {
actions = in.readEnumSet(RuleAction.class);
scope = new RuleScope(in);
conditions = in.readCollectionAsList(RuleCondition::new);
- if (in.getTransportVersion().onOrAfter(TransportVersions.ML_ADD_DETECTION_RULE_PARAMS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
params = new RuleParams(in);
} else {
params = new RuleParams();
@@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeEnumSet(actions);
scope.writeTo(out);
out.writeCollection(conditions);
- if (out.getTransportVersion().onOrAfter(TransportVersions.ML_ADD_DETECTION_RULE_PARAMS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
params.writeTo(out);
}
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java
index 14ecf4cb0d6e..24f0a5243620 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java
@@ -160,10 +160,8 @@ private static void buildRoleQuery(
failIfQueryUsesClient(queryBuilder, context);
Query roleQuery = context.toQuery(queryBuilder).query();
filter.add(roleQuery, SHOULD);
- NestedLookup nestedLookup = context.nestedLookup();
- if (nestedLookup != NestedLookup.EMPTY) {
- NestedHelper nestedHelper = new NestedHelper(nestedLookup, context::isFieldMapped);
- if (nestedHelper.mightMatchNestedDocs(roleQuery)) {
+ if (context.nestedLookup() != NestedLookup.EMPTY) {
+ if (NestedHelper.mightMatchNestedDocs(roleQuery, context)) {
roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER)
.add(Queries.newNonNestedFilter(context.indexVersionCreated()), FILTER)
.build();
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java
index b93aa079a28d..148fdf21fd2d 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java
@@ -82,7 +82,7 @@ public static ConfigurableClusterPrivilege[] readArray(StreamInput in) throws IO
* Utility method to write an array of {@link ConfigurableClusterPrivilege} objects to a {@link StreamOutput}
*/
public static void writeArray(StreamOutput out, ConfigurableClusterPrivilege[] privileges) throws IOException {
- if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeArray(WRITER, privileges);
} else {
out.writeArray(
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java
index 87d658c6f983..e9ec8dfe8ee5 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java
@@ -57,7 +57,7 @@ protected MachineLearningFeatureSetUsage mutateInstance(MachineLearningFeatureSe
@Override
protected MachineLearningFeatureSetUsage mutateInstanceForVersion(MachineLearningFeatureSetUsage instance, TransportVersion version) {
- if (version.before(TransportVersions.ML_TELEMETRY_MEMORY_ADDED)) {
+ if (version.before(TransportVersions.V_8_16_0)) {
return new MachineLearningFeatureSetUsage(
instance.available(),
instance.enabled(),
diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java
index 3a61c848d381..d694b2681ee8 100644
--- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java
+++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java
@@ -68,8 +68,7 @@ public QueryRulesetListItem(StreamInput in) throws IOException {
this.criteriaTypeToCountMap = Map.of();
}
TransportVersion streamTransportVersion = in.getTransportVersion();
- if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15)
- || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16)
+ if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16)
|| streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) {
this.ruleTypeToCountMap = in.readMap(m -> in.readEnum(QueryRule.QueryRuleType.class), StreamInput::readInt);
} else {
@@ -104,8 +103,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeMap(criteriaTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt);
}
TransportVersion streamTransportVersion = out.getTransportVersion();
- if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15)
- || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16)
+ if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16)
|| streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) {
out.writeMap(ruleTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt);
}
diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java
index 54a89d061de3..5b27cc7a3e05 100644
--- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java
+++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java
@@ -110,12 +110,14 @@ public QueryRuleRetrieverBuilder(
Map matchCriteria,
List retrieverSource,
int rankWindowSize,
- String retrieverName
+ String retrieverName,
+ List preFilterQueryBuilders
) {
super(retrieverSource, rankWindowSize);
this.rulesetIds = rulesetIds;
this.matchCriteria = matchCriteria;
this.retrieverName = retrieverName;
+ this.preFilterQueryBuilders = preFilterQueryBuilders;
}
@Override
@@ -156,8 +158,15 @@ public void doToXContent(XContentBuilder builder, Params params) throws IOExcept
}
@Override
- protected QueryRuleRetrieverBuilder clone(List newChildRetrievers) {
- return new QueryRuleRetrieverBuilder(rulesetIds, matchCriteria, newChildRetrievers, rankWindowSize, retrieverName);
+ protected QueryRuleRetrieverBuilder clone(List newChildRetrievers, List newPreFilterQueryBuilders) {
+ return new QueryRuleRetrieverBuilder(
+ rulesetIds,
+ matchCriteria,
+ newChildRetrievers,
+ rankWindowSize,
+ retrieverName,
+ newPreFilterQueryBuilders
+ );
}
@Override
diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java
index 27d5e240534b..c822dd123d3f 100644
--- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java
+++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java
@@ -59,8 +59,7 @@ protected ListQueryRulesetsAction.Response mutateInstanceForVersion(
ListQueryRulesetsAction.Response instance,
TransportVersion version
) {
- if (version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15)
- || version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16)
+ if (version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16)
|| version.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) {
return instance;
} else if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) {
diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java
index 7041de1106b5..8582ee1bd8d2 100644
--- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java
+++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java
@@ -51,6 +51,6 @@ protected TestQueryRulesetAction.Request mutateInstanceForVersion(TestQueryRules
@Override
protected List bwcVersions() {
- return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.QUERY_RULE_TEST_API)).collect(Collectors.toList());
+ return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.V_8_16_0)).collect(Collectors.toList());
}
}
diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java
index a6562fb7b52a..142310ac4033 100644
--- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java
+++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java
@@ -47,6 +47,6 @@ protected TestQueryRulesetAction.Response mutateInstanceForVersion(TestQueryRule
@Override
protected List bwcVersions() {
- return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.QUERY_RULE_TEST_API)).collect(Collectors.toList());
+ return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.V_8_16_0)).collect(Collectors.toList());
}
}
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java
index 7836522c7713..468d076c1b7e 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java
@@ -107,9 +107,7 @@ protected Query asQuery(Not not, TranslatorHandler handler) {
}
public static Query doTranslate(Not not, TranslatorHandler handler) {
- Query wrappedQuery = handler.asQuery(not.field());
- Query q = wrappedQuery.negate(not.source());
- return q;
+ return handler.asQuery(not.field()).negate(not.source());
}
}
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java
index a63571093ba5..d86cdb0de038 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java
@@ -32,6 +32,113 @@
import static org.elasticsearch.xpack.esql.core.util.PlanStreamInput.readCachedStringWithVersionCheck;
import static org.elasticsearch.xpack.esql.core.util.PlanStreamOutput.writeCachedStringWithVersionCheck;
+/**
+ * This enum represents data types the ES|QL query processing layer is able to
+ * interact with in some way. This includes fully representable types (e.g.
+ * {@link DataType#LONG}, numeric types which we promote (e.g. {@link DataType#SHORT})
+ * or fold into other types (e.g. {@link DataType#DATE_PERIOD}) early in the
+ * processing pipeline, types for internal use
+ * cases (e.g. {@link DataType#PARTIAL_AGG}), and types which the language
+ * doesn't support, but require special handling anyway (e.g.
+ * {@link DataType#OBJECT})
+ *
+ *
Process for adding a new data type
+ * Note: it is not expected that all the following steps be done in a single PR.
+ * Use capabilities to gate tests as you go, and use as many PRs as you think
+ * appropriate. New data types are complex, and smaller PRs will make reviews
+ * easier.
+ *
+ *
+ * Create a new feature flag for the type in {@link EsqlCorePlugin}. We
+ * recommend developing the data type over a series of smaller PRs behind
+ * a feature flag; even for relatively simple data types.
+ *
+ * Add a capability to EsqlCapabilities related to the new type, and
+ * gated by the feature flag you just created. Again, using the feature
+ * flag is preferred over snapshot-only. As development progresses, you may
+ * need to add more capabilities related to the new type, e.g. for
+ * supporting specific functions. This is fine, and expected.
+ *
+ * Create a new CSV test file for the new type. You'll either need to
+ * create a new data file as well, or add values of the new type to
+ * and existing data file. See CsvTestDataLoader for creating a new data
+ * set.
+ *
+ * In the new CSV test file, start adding basic functionality tests.
+ * These should include reading and returning values, both from indexed data
+ * and from the ROW command. It should also include functions that support
+ * "every" type, such as Case or MvFirst.
+ *
+ * Add the new type to the CsvTestUtils#Type enum, if it isn't already
+ * there. You also need to modify CsvAssert to support reading values
+ * of the new type.
+ *
+ * At this point, the CSV tests should fail with a sensible ES|QL error
+ * message. Make sure they're failing in ES|QL, not in the test
+ * framework.
+ *
+ * Add the new data type to this enum. This will cause a bunch of
+ * compile errors for switch statements throughout the code. Resolve those
+ * as appropriate. That is the main way in which the new type will be tied
+ * into the framework.
+ *
+ * Add the new type to the {@link DataType#UNDER_CONSTRUCTION}
+ * collection. This is used by the test framework to disable some checks
+ * around how functions report their supported types, which would otherwise
+ * generate a lot of noise while the type is still in development.
+ *
+ * Add typed data generators to TestCaseSupplier, and make sure all
+ * functions that support the new type have tests for it.
+ *
+ * Work to support things all types should do. Equality and the
+ * "typeless" MV functions (MvFirst, MvLast, and MvCount) should work for
+ * most types. Case and Coalesce should also support all types.
+ * If the type has a natural ordering, make sure to test
+ * sorting and the other binary comparisons. Make sure these functions all
+ * have CSV tests that run against indexed data.
+ *
+ * Add conversion functions as appropriate. Almost all types should
+ * support ToString, and should have a "ToType" function that accepts a
+ * string. There may be other logical conversions depending on the nature
+ * of the type. Make sure to add the conversion function to the
+ * TYPE_TO_CONVERSION_FUNCTION map in EsqlDataTypeConverter. Make sure the
+ * conversion functions have CSV tests that run against indexed data.
+ *
+ * Support the new type in aggregations that are type independent.
+ * This includes Values, Count, and Count Distinct. Make sure there are
+ * CSV tests against indexed data for these.
+ *
+ * Support other functions and aggregations as appropriate, making sure
+ * to included CSV tests.
+ *
+ * Consider how the type will interact with other types. For example,
+ * if the new type is numeric, it may be good for it to be comparable with
+ * other numbers. Supporting this may require new logic in
+ * EsqlDataTypeConverter#commonType, individual function type checking, the
+ * verifier rules, or other places. We suggest starting with CSV tests and
+ * seeing where they fail.
+ *
+ * There are some additional steps that should be taken when removing the
+ * feature flag and getting ready for a release:
+ *
+ *
+ * Ensure the capabilities for this type are always enabled
+ *
+ *
+ * Remove the type from the {@link DataType#UNDER_CONSTRUCTION}
+ * collection
+ *
+ * Fix new test failures related to declared function types
+ *
+ *
+ * Make sure to run the full test suite locally via gradle to generate
+ * the function type tables and helper files with the new type. Ensure all
+ * the functions that support the type have appropriate docs for it.
+ *
+ * If appropriate, remove the type from the ESQL limitations list of
+ * unsupported types.
+ *
+ */
public enum DataType {
/**
* Fields of this type are unsupported by any functions and are always
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java
index 47dadcbb11de..73e2d5ec626a 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java
@@ -72,7 +72,7 @@ public EsField(StreamInput in) throws IOException {
private DataType readDataType(StreamInput in) throws IOException {
String name = readCachedStringWithVersionCheck(in);
- if (in.getTransportVersion().before(TransportVersions.ESQL_NESTED_UNSUPPORTED) && name.equalsIgnoreCase("NESTED")) {
+ if (in.getTransportVersion().before(TransportVersions.V_8_16_0) && name.equalsIgnoreCase("NESTED")) {
/*
* The "nested" data type existed in older versions of ESQL but was
* entirely used to filter mappings away. Those versions will still
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/CollectionUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/CollectionUtils.java
index 8bfcf4ca5c40..ce0540687121 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/CollectionUtils.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/CollectionUtils.java
@@ -30,12 +30,8 @@ public static List combine(List extends T> left, List extends T> righ
}
List list = new ArrayList<>(left.size() + right.size());
- if (left.isEmpty() == false) {
- list.addAll(left);
- }
- if (right.isEmpty() == false) {
- list.addAll(right);
- }
+ list.addAll(left);
+ list.addAll(right);
return list;
}
@@ -73,13 +69,6 @@ public static List combine(Collection extends T> left, T... entries) {
return list;
}
- public static int mapSize(int size) {
- if (size < 2) {
- return size + 1;
- }
- return (int) (size / 0.75f + 1f);
- }
-
@SafeVarargs
@SuppressWarnings("varargs")
public static List nullSafeList(T... entries) {
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java
index e8ccae342900..b570a50535a5 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java
@@ -52,7 +52,7 @@ public interface PlanStreamInput {
String readCachedString() throws IOException;
static String readCachedStringWithVersionCheck(StreamInput planStreamInput) throws IOException {
- if (planStreamInput.getTransportVersion().before(TransportVersions.ESQL_CACHED_STRING_SERIALIZATION)) {
+ if (planStreamInput.getTransportVersion().before(TransportVersions.V_8_16_0)) {
return planStreamInput.readString();
}
return ((PlanStreamInput) planStreamInput).readCachedString();
diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java
index fb4af33d2fd6..a5afcb5fa29a 100644
--- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java
+++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java
@@ -37,7 +37,7 @@ public interface PlanStreamOutput {
void writeCachedString(String field) throws IOException;
static void writeCachedStringWithVersionCheck(StreamOutput planStreamOutput, String string) throws IOException {
- if (planStreamOutput.getTransportVersion().before(TransportVersions.ESQL_CACHED_STRING_SERIALIZATION)) {
+ if (planStreamOutput.getTransportVersion().before(TransportVersions.V_8_16_0)) {
planStreamOutput.writeString(string);
} else {
((PlanStreamOutput) planStreamOutput).writeCachedString(string);
diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java
index 9338077a5557..f57f450c7ee3 100644
--- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java
+++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java
@@ -219,7 +219,7 @@ public Status(long aggregationNanos, long aggregationFinishNanos, int pagesProce
protected Status(StreamInput in) throws IOException {
aggregationNanos = in.readVLong();
- if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
aggregationFinishNanos = in.readOptionalVLong();
} else {
aggregationFinishNanos = null;
@@ -230,7 +230,7 @@ protected Status(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(aggregationNanos);
- if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalVLong(aggregationFinishNanos);
}
out.writeVInt(pagesProcessed);
diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java
index d98613f1817a..c071b5055df7 100644
--- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java
+++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java
@@ -79,7 +79,7 @@ public DriverProfile(
}
public DriverProfile(StreamInput in) throws IOException {
- if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) {
+ if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.startMillis = in.readVLong();
this.stopMillis = in.readVLong();
} else {
@@ -101,7 +101,7 @@ public DriverProfile(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVLong(startMillis);
out.writeVLong(stopMillis);
}
diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java
index 01e9a73c4fb5..d8856ebedb80 100644
--- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java
+++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java
@@ -76,7 +76,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
static final int RECORDS = 10;
public static DriverSleeps read(StreamInput in) throws IOException {
- if (in.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) {
+ if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) {
return empty();
}
return new DriverSleeps(
@@ -88,7 +88,7 @@ public static DriverSleeps read(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) {
+ if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) {
return;
}
out.writeMap(counts, StreamOutput::writeVLong);
diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec
index 712cadf6d44f..584cde55080e 100644
--- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec
+++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec
@@ -120,6 +120,19 @@ left:keyword | client_ip:keyword | right:keyword | env:keyword
left | 172.21.0.5 | right | Development
;
+lookupIPFromRowWithShadowingKeepReordered
+required_capability: join_lookup_v4
+
+ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right"
+| EVAL client_ip = client_ip::keyword
+| LOOKUP JOIN clientips_lookup ON client_ip
+| KEEP right, env, client_ip
+;
+
+right:keyword | env:keyword | client_ip:keyword
+right | Development | 172.21.0.5
+;
+
lookupIPFromIndex
required_capability: join_lookup_v4
@@ -263,6 +276,24 @@ ignoreOrder:true;
2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 | Success
;
+lookupMessageFromIndexKeepReordered
+required_capability: join_lookup_v4
+
+FROM sample_data
+| LOOKUP JOIN message_types_lookup ON message
+| KEEP type, client_ip, event_duration, message
+;
+
+type:keyword | client_ip:ip | event_duration:long | message:keyword
+Success | 172.21.3.15 | 1756467 | Connected to 10.1.0.1
+Error | 172.21.3.15 | 5033755 | Connection error
+Error | 172.21.3.15 | 8268153 | Connection error
+Error | 172.21.3.15 | 725448 | Connection error
+Disconnected | 172.21.0.5 | 1232382 | Disconnected
+Success | 172.21.2.113 | 2764889 | Connected to 10.1.0.2
+Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3
+;
+
lookupMessageFromIndexStats
required_capability: join_lookup_v4
diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java
index 147b13b36c44..00f53d31165b 100644
--- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java
+++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java
@@ -18,6 +18,7 @@
import org.elasticsearch.client.internal.ClusterAdminClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
@@ -1648,6 +1649,44 @@ public void testMaxTruncationSizeSetting() {
}
}
+ public void testScriptField() throws Exception {
+ XContentBuilder mapping = JsonXContent.contentBuilder();
+ mapping.startObject();
+ {
+ mapping.startObject("runtime");
+ {
+ mapping.startObject("k1");
+ mapping.field("type", "long");
+ mapping.endObject();
+ mapping.startObject("k2");
+ mapping.field("type", "long");
+ mapping.endObject();
+ }
+ mapping.endObject();
+ {
+ mapping.startObject("properties");
+ mapping.startObject("meter").field("type", "double").endObject();
+ mapping.endObject();
+ }
+ }
+ mapping.endObject();
+ String sourceMode = randomBoolean() ? "stored" : "synthetic";
+ Settings.Builder settings = indexSettings(1, 0).put(indexSettings()).put("index.mapping.source.mode", sourceMode);
+ client().admin().indices().prepareCreate("test-script").setMapping(mapping).setSettings(settings).get();
+ for (int i = 0; i < 10; i++) {
+ index("test-script", Integer.toString(i), Map.of("k1", i, "k2", "b-" + i, "meter", 10000 * i));
+ }
+ refresh("test-script");
+ try (EsqlQueryResponse resp = run("FROM test-script | SORT k1 | LIMIT 10")) {
+ List