diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml
index 00af4d006d0ac..2bd91b7fe3739 100644
--- a/.buildkite/pipelines/intake.yml
+++ b/.buildkite/pipelines/intake.yml
@@ -48,7 +48,7 @@ steps:
timeout_in_minutes: 300
matrix:
setup:
- BWC_VERSION: ["7.17.18", "8.12.1", "8.13.0"]
+ BWC_VERSION: ["7.17.19", "8.12.2", "8.13.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml
index 30d4f4486dad5..ed00a0655dbd8 100644
--- a/.buildkite/pipelines/periodic-packaging.yml
+++ b/.buildkite/pipelines/periodic-packaging.yml
@@ -1121,6 +1121,22 @@ steps:
env:
BWC_VERSION: 7.17.18
+ - label: "{{matrix.image}} / 7.17.19 / packaging-tests-upgrade"
+ command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.19
+ timeout_in_minutes: 300
+ matrix:
+ setup:
+ image:
+ - rocky-8
+ - ubuntu-2004
+ agents:
+ provider: gcp
+ image: family/elasticsearch-{{matrix.image}}
+ machineType: custom-16-32768
+ buildDirectory: /dev/shm/bk
+ env:
+ BWC_VERSION: 7.17.19
+
- label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade"
command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0
timeout_in_minutes: 300
@@ -1841,6 +1857,22 @@ steps:
env:
BWC_VERSION: 8.12.1
+ - label: "{{matrix.image}} / 8.12.2 / packaging-tests-upgrade"
+ command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.2
+ timeout_in_minutes: 300
+ matrix:
+ setup:
+ image:
+ - rocky-8
+ - ubuntu-2004
+ agents:
+ provider: gcp
+ image: family/elasticsearch-{{matrix.image}}
+ machineType: custom-16-32768
+ buildDirectory: /dev/shm/bk
+ env:
+ BWC_VERSION: 8.12.2
+
- label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade"
command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0
timeout_in_minutes: 300
diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml
index 44007272f8954..86dc3c216d060 100644
--- a/.buildkite/pipelines/periodic.yml
+++ b/.buildkite/pipelines/periodic.yml
@@ -682,6 +682,16 @@ steps:
buildDirectory: /dev/shm/bk
env:
BWC_VERSION: 7.17.18
+ - label: 7.17.19 / bwc
+ command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.19#bwcTest
+ timeout_in_minutes: 300
+ agents:
+ provider: gcp
+ image: family/elasticsearch-ubuntu-2004
+ machineType: n1-standard-32
+ buildDirectory: /dev/shm/bk
+ env:
+ BWC_VERSION: 7.17.19
- label: 8.0.0 / bwc
command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest
timeout_in_minutes: 300
@@ -1132,6 +1142,16 @@ steps:
buildDirectory: /dev/shm/bk
env:
BWC_VERSION: 8.12.1
+ - label: 8.12.2 / bwc
+ command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.2#bwcTest
+ timeout_in_minutes: 300
+ agents:
+ provider: gcp
+ image: family/elasticsearch-ubuntu-2004
+ machineType: n1-standard-32
+ buildDirectory: /dev/shm/bk
+ env:
+ BWC_VERSION: 8.12.2
- label: 8.13.0 / bwc
command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest
timeout_in_minutes: 300
diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json
index c4aa43c775b1e..de0212685a8a7 100644
--- a/.buildkite/pull-requests.json
+++ b/.buildkite/pull-requests.json
@@ -11,7 +11,7 @@
"set_commit_status": false,
"build_on_commit": true,
"build_on_comment": true,
- "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*(buildkite\\s*)?test\\s+this(\\s+please)?)",
+ "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*((buildkite|@elastic(search)?machine)\\s*)?test\\s+this(\\s+please)?)",
"cancel_intermediate_builds": true,
"cancel_intermediate_builds_on_comment": false
},
diff --git a/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap b/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap
index 6df8ca8b63438..50dea7a07e042 100644
--- a/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap
+++ b/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap
@@ -201,3 +201,111 @@ exports[`generatePipelines should generate correct pipeline when using a trigger
},
]
`;
+
+exports[`generatePipelines should generate correct pipelines with a non-docs change and @elasticmachine 1`] = `
+[
+ {
+ "name": "bwc-snapshots",
+ "pipeline": {
+ "steps": [
+ {
+ "group": "bwc-snapshots",
+ "steps": [
+ {
+ "agents": {
+ "buildDirectory": "/dev/shm/bk",
+ "image": "family/elasticsearch-ubuntu-2004",
+ "machineType": "custom-32-98304",
+ "provider": "gcp",
+ },
+ "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed v{{matrix.BWC_VERSION}}#bwcTest",
+ "env": {
+ "BWC_VERSION": "{{matrix.BWC_VERSION}}",
+ },
+ "label": "{{matrix.BWC_VERSION}} / bwc-snapshots",
+ "matrix": {
+ "setup": {
+ "BWC_VERSION": [
+ "7.17.14",
+ "8.10.3",
+ "8.11.0",
+ ],
+ },
+ },
+ "timeout_in_minutes": 300,
+ },
+ ],
+ },
+ ],
+ },
+ },
+ {
+ "name": "using-defaults",
+ "pipeline": {
+ "env": {
+ "CUSTOM_ENV_VAR": "value",
+ },
+ "steps": [
+ {
+ "command": "echo 'hello world'",
+ "label": "test-step",
+ },
+ ],
+ },
+ },
+]
+`;
+
+exports[`generatePipelines should generate correct pipelines with a non-docs change and @elasticsearchmachine 1`] = `
+[
+ {
+ "name": "bwc-snapshots",
+ "pipeline": {
+ "steps": [
+ {
+ "group": "bwc-snapshots",
+ "steps": [
+ {
+ "agents": {
+ "buildDirectory": "/dev/shm/bk",
+ "image": "family/elasticsearch-ubuntu-2004",
+ "machineType": "custom-32-98304",
+ "provider": "gcp",
+ },
+ "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed v{{matrix.BWC_VERSION}}#bwcTest",
+ "env": {
+ "BWC_VERSION": "{{matrix.BWC_VERSION}}",
+ },
+ "label": "{{matrix.BWC_VERSION}} / bwc-snapshots",
+ "matrix": {
+ "setup": {
+ "BWC_VERSION": [
+ "7.17.14",
+ "8.10.3",
+ "8.11.0",
+ ],
+ },
+ },
+ "timeout_in_minutes": 300,
+ },
+ ],
+ },
+ ],
+ },
+ },
+ {
+ "name": "using-defaults",
+ "pipeline": {
+ "env": {
+ "CUSTOM_ENV_VAR": "value",
+ },
+ "steps": [
+ {
+ "command": "echo 'hello world'",
+ "label": "test-step",
+ },
+ ],
+ },
+ },
+]
+`;
diff --git a/.buildkite/scripts/pull-request/pipeline.test.ts b/.buildkite/scripts/pull-request/pipeline.test.ts
index d0634752260e4..562f37abbae1f 100644
--- a/.buildkite/scripts/pull-request/pipeline.test.ts
+++ b/.buildkite/scripts/pull-request/pipeline.test.ts
@@ -13,11 +13,11 @@ describe("generatePipelines", () => {
});
// Helper for testing pipeline generations that should be the same when using the overall ci trigger comment "buildkite test this"
- const testWithTriggerCheck = (directory: string, changedFiles?: string[]) => {
+ const testWithTriggerCheck = (directory: string, changedFiles?: string[], comment = "buildkite test this") => {
const pipelines = generatePipelines(directory, changedFiles);
expect(pipelines).toMatchSnapshot();
- process.env["GITHUB_PR_TRIGGER_COMMENT"] = "buildkite test this";
+ process.env["GITHUB_PR_TRIGGER_COMMENT"] = comment;
const pipelinesWithTriggerComment = generatePipelines(directory, changedFiles);
expect(pipelinesWithTriggerComment).toEqual(pipelines);
};
@@ -42,4 +42,20 @@ describe("generatePipelines", () => {
const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]);
expect(pipelines).toMatchSnapshot();
});
+
+ test("should generate correct pipelines with a non-docs change and @elasticmachine", () => {
+ testWithTriggerCheck(
+ `${import.meta.dir}/mocks/pipelines`,
+ ["build.gradle", "docs/README.asciidoc"],
+ "@elasticmachine test this please"
+ );
+ });
+
+ test("should generate correct pipelines with a non-docs change and @elasticsearchmachine", () => {
+ testWithTriggerCheck(
+ `${import.meta.dir}/mocks/pipelines`,
+ ["build.gradle", "docs/README.asciidoc"],
+ "@elasticsearchmachine test this please"
+ );
+ });
});
diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts
index 65aec47fe3cc8..6cb0e5d76b74b 100644
--- a/.buildkite/scripts/pull-request/pipeline.ts
+++ b/.buildkite/scripts/pull-request/pipeline.ts
@@ -148,7 +148,9 @@ export const generatePipelines = (
// However, if we're using the overall CI trigger "[buildkite] test this [please]", we should use the regular filters above
if (
process.env["GITHUB_PR_TRIGGER_COMMENT"] &&
- !process.env["GITHUB_PR_TRIGGER_COMMENT"].match(/^\s*(buildkite\s*)?test\s+this(\s+please)?/i)
+ !process.env["GITHUB_PR_TRIGGER_COMMENT"].match(
+ /^\s*((@elastic(search)?machine|buildkite)\s*)?test\s+this(\s+please)?/i
+ )
) {
filters = [triggerCommentCheck];
}
diff --git a/.ci/bwcVersions b/.ci/bwcVersions
index 3871c6d06fd23..8ac1a60c9530c 100644
--- a/.ci/bwcVersions
+++ b/.ci/bwcVersions
@@ -67,6 +67,7 @@ BWC_VERSION:
- "7.17.16"
- "7.17.17"
- "7.17.18"
+ - "7.17.19"
- "8.0.0"
- "8.0.1"
- "8.1.0"
@@ -112,4 +113,5 @@ BWC_VERSION:
- "8.11.4"
- "8.12.0"
- "8.12.1"
+ - "8.12.2"
- "8.13.0"
diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions
index 36c0eb5a2999c..079f3565880e4 100644
--- a/.ci/snapshotBwcVersions
+++ b/.ci/snapshotBwcVersions
@@ -1,4 +1,4 @@
BWC_VERSION:
- - "7.17.18"
- - "8.12.1"
+ - "7.17.19"
+ - "8.12.2"
- "8.13.0"
diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle
index 934d9f05d77a2..758cdf687e6b6 100644
--- a/build-tools-internal/build.gradle
+++ b/build-tools-internal/build.gradle
@@ -143,6 +143,10 @@ gradlePlugin {
id = 'elasticsearch.mrjar'
implementationClass = 'org.elasticsearch.gradle.internal.MrjarPlugin'
}
+ embeddedProvider {
+ id = 'elasticsearch.embedded-providers'
+ implementationClass = 'org.elasticsearch.gradle.internal.EmbeddedProviderPlugin'
+ }
releaseTools {
id = 'elasticsearch.release-tools'
implementationClass = 'org.elasticsearch.gradle.internal.release.ReleaseToolsPlugin'
diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle
index 683a2d5604055..b6996b7493f54 100644
--- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle
+++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle
@@ -114,7 +114,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') {
description = 'Builds artifacts needed as dependency for IDE modules'
dependsOn([':client:rest-high-level:shadowJar',
':plugins:repository-hdfs:hadoop-client-api:shadowJar',
- ':libs:elasticsearch-x-content:generateProviderImpl',
+ ':libs:elasticsearch-x-content:generateImplProviderImpl',
':x-pack:plugin:esql:compute:ann:jar',
':x-pack:plugin:esql:compute:gen:jar',
':server:generateModulesList',
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java
index 4a695e93ebdfe..e224b16bf588e 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java
@@ -26,14 +26,26 @@
import org.gradle.api.tasks.compile.CompileOptions;
import org.gradle.api.tasks.compile.GroovyCompile;
import org.gradle.api.tasks.compile.JavaCompile;
+import org.gradle.jvm.toolchain.JavaLanguageVersion;
+import org.gradle.jvm.toolchain.JavaToolchainService;
import java.util.List;
+import javax.inject.Inject;
+
/**
* A wrapper around Gradle's Java Base plugin that applies our
* common configuration for production code.
*/
public class ElasticsearchJavaBasePlugin implements Plugin {
+
+ private final JavaToolchainService javaToolchains;
+
+ @Inject
+ ElasticsearchJavaBasePlugin(JavaToolchainService javaToolchains) {
+ this.javaToolchains = javaToolchains;
+ }
+
@Override
public void apply(Project project) {
// make sure the global build info plugin is applied to the root project
@@ -103,7 +115,7 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S
/**
* Adds compiler settings to the project
*/
- public static void configureCompile(Project project) {
+ public void configureCompile(Project project) {
project.getExtensions().getExtraProperties().set("compactProfile", "full");
JavaPluginExtension java = project.getExtensions().getByType(JavaPluginExtension.class);
if (BuildParams.getJavaToolChainSpec().isPresent()) {
@@ -112,6 +124,10 @@ public static void configureCompile(Project project) {
java.setSourceCompatibility(BuildParams.getMinimumRuntimeVersion());
java.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion());
project.getTasks().withType(JavaCompile.class).configureEach(compileTask -> {
+ compileTask.getJavaCompiler().set(javaToolchains.compilerFor(spec -> {
+ spec.getLanguageVersion().set(JavaLanguageVersion.of(BuildParams.getMinimumRuntimeVersion().getMajorVersion()));
+ }));
+
CompileOptions compileOptions = compileTask.getOptions();
/*
* -path because gradle will send in paths that don't always exist.
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java
new file mode 100644
index 0000000000000..d3f79f7f76d4f
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal;
+
+import org.gradle.api.Project;
+import org.gradle.api.artifacts.Configuration;
+import org.gradle.api.file.Directory;
+import org.gradle.api.provider.Provider;
+import org.gradle.api.tasks.SourceSet;
+import org.gradle.api.tasks.Sync;
+
+import static org.elasticsearch.gradle.internal.conventions.GUtils.capitalize;
+import static org.elasticsearch.gradle.util.GradleUtils.getJavaSourceSets;
+import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE;
+import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.DIRECTORY_TYPE;
+
+public class EmbeddedProviderExtension {
+
+ private final Project project;
+
+ public EmbeddedProviderExtension(Project project) {
+ this.project = project;
+ }
+
+ void impl(String implName, Project implProject) {
+ String projectName = implProject.getName();
+ String capitalName = capitalize(projectName);
+
+ Configuration implConfig = project.getConfigurations().detachedConfiguration(project.getDependencies().create(implProject));
+ implConfig.attributes(attrs -> {
+ attrs.attribute(ARTIFACT_TYPE_ATTRIBUTE, DIRECTORY_TYPE);
+ attrs.attribute(EmbeddedProviderPlugin.IMPL_ATTR, true);
+ });
+
+ String manifestTaskName = "generate" + capitalName + "ProviderManifest";
+ Provider generatedResourcesDir = project.getLayout().getBuildDirectory().dir("generated-resources");
+ var generateProviderManifest = project.getTasks().register(manifestTaskName, GenerateProviderManifest.class);
+ generateProviderManifest.configure(t -> {
+ t.getManifestFile().set(generatedResourcesDir.map(d -> d.file("LISTING.TXT")));
+ t.getProviderImplClasspath().from(implConfig);
+ });
+
+ String implTaskName = "generate" + capitalName + "ProviderImpl";
+ var generateProviderImpl = project.getTasks().register(implTaskName, Sync.class);
+ generateProviderImpl.configure(t -> {
+ t.into(generatedResourcesDir);
+ t.into("IMPL-JARS/" + implName, spec -> {
+ spec.from(implConfig);
+ spec.from(generateProviderManifest);
+ });
+ });
+
+ var mainSourceSet = getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME);
+ mainSourceSet.getOutput().dir(generateProviderImpl);
+ }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderPlugin.java
new file mode 100644
index 0000000000000..213730139d915
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderPlugin.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal;
+
+import org.elasticsearch.gradle.transform.UnzipTransform;
+import org.gradle.api.Plugin;
+import org.gradle.api.Project;
+import org.gradle.api.attributes.Attribute;
+
+import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE;
+import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.DIRECTORY_TYPE;
+import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.JAR_TYPE;
+
+public class EmbeddedProviderPlugin implements Plugin {
+ static final Attribute IMPL_ATTR = Attribute.of("is.impl", Boolean.class);
+
+ @Override
+ public void apply(Project project) {
+
+ project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> {
+ transformSpec.getFrom().attribute(ARTIFACT_TYPE_ATTRIBUTE, JAR_TYPE).attribute(IMPL_ATTR, true);
+ transformSpec.getTo().attribute(ARTIFACT_TYPE_ATTRIBUTE, DIRECTORY_TYPE).attribute(IMPL_ATTR, true);
+ transformSpec.parameters(parameters -> parameters.getIncludeArtifactName().set(true));
+ });
+
+ project.getExtensions().create("embeddedProviders", EmbeddedProviderExtension.class, project);
+ }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java
index 8b21826447b46..8c5d671e00fe7 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java
@@ -11,12 +11,14 @@
import org.elasticsearch.gradle.util.GradleUtils;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
-import org.gradle.api.plugins.JavaLibraryPlugin;
+import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.tasks.SourceSet;
+import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.compile.CompileOptions;
import org.gradle.api.tasks.compile.JavaCompile;
+import org.gradle.api.tasks.testing.Test;
import org.gradle.jvm.tasks.Jar;
import org.gradle.jvm.toolchain.JavaLanguageVersion;
import org.gradle.jvm.toolchain.JavaToolchainService;
@@ -50,7 +52,7 @@ public class MrjarPlugin implements Plugin {
@Override
public void apply(Project project) {
- project.getPluginManager().apply(JavaLibraryPlugin.class);
+ project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class);
var javaExtension = project.getExtensions().getByType(JavaPluginExtension.class);
var srcDir = project.getProjectDir().toPath().resolve("src");
@@ -73,9 +75,19 @@ private void addMrjarSourceset(Project project, JavaPluginExtension javaExtensio
SourceSet sourceSet = javaExtension.getSourceSets().maybeCreate(sourcesetName);
GradleUtils.extendSourceSet(project, SourceSet.MAIN_SOURCE_SET_NAME, sourcesetName);
- project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME).configure(jarTask -> {
- jarTask.into("META-INF/versions/" + javaVersion, copySpec -> copySpec.from(sourceSet.getOutput()));
- jarTask.manifest(manifest -> { manifest.attributes(Map.of("Multi-Release", "true")); });
+ var jarTask = project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME);
+ jarTask.configure(task -> {
+ task.into("META-INF/versions/" + javaVersion, copySpec -> copySpec.from(sourceSet.getOutput()));
+ task.manifest(manifest -> { manifest.attributes(Map.of("Multi-Release", "true")); });
+ });
+
+ project.getTasks().withType(Test.class).named(JavaPlugin.TEST_TASK_NAME).configure(testTask -> {
+ testTask.dependsOn(jarTask);
+
+ SourceSetContainer sourceSets = GradleUtils.getJavaSourceSets(project);
+ FileCollection mainRuntime = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME).getRuntimeClasspath();
+ FileCollection testRuntime = sourceSets.getByName(SourceSet.TEST_SOURCE_SET_NAME).getRuntimeClasspath();
+ testTask.setClasspath(testRuntime.minus(mainRuntime).plus(project.files(jarTask)));
});
project.getTasks().withType(JavaCompile.class).named(sourceSet.getCompileJavaTaskName()).configure(compileTask -> {
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java
index bddf95cae77d4..0270ee22ca8c5 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java
@@ -101,7 +101,7 @@ private AdoptiumVersionInfo toVersionInfo(JsonNode node) {
private URI resolveDownloadURI(AdoptiumVersionRequest request, AdoptiumVersionInfo versionInfo) {
return URI.create(
"https://api.adoptium.net/v3/binary/version/jdk-"
- + versionInfo.openjdkVersion
+ + versionInfo.semver
+ "/"
+ request.platform
+ "/"
diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy
index 7b8129f8dbaec..6383d577f027f 100644
--- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy
+++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy
@@ -42,7 +42,7 @@ class AdoptiumJdkToolchainResolverSpec extends AbstractToolchainResolverSpec {
1,
1,
"" + languageVersion.asInt() + ".1.1.1+37",
- 0, "" + languageVersion.asInt() + ".1.1.1"
+ 0, "" + languageVersion.asInt() + ".1.1.1+37.1"
)))
}
@@ -52,22 +52,22 @@ class AdoptiumJdkToolchainResolverSpec extends AbstractToolchainResolverSpec {
@Override
def supportedRequests() {
return [
- [19, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [19, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [19, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [19, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
- [19, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [19, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [19, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [19, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [19, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [19, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
- [18, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [18, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [18, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [18, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
- [18, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
- [17, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [17, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [17, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"],
- [17, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
- [17, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"]
+ [18, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [18, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [18, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [18, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [18, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [17, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [17, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [17, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [17, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"],
+ [17, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"]
]
}
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
index 4a8b3da4777a0..27e79e637299c 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
@@ -19,6 +19,8 @@
final class SystemJvmOptions {
static List systemJvmOptions(Settings nodeSettings, final Map sysprops) {
+ String distroType = sysprops.get("es.distribution.type");
+ boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot");
return Stream.of(
/*
* Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl;
@@ -65,10 +67,11 @@ static List systemJvmOptions(Settings nodeSettings, final Map e.isEmpty() == false).collect(Collectors.toList());
}
@@ -86,13 +89,25 @@ static List systemJvmOptions(Settings nodeSettings, final Map returning non-matching data streams
-area: Data streams
-type: bug
-issues:
- - 96589
diff --git a/docs/changelog/104198.yaml b/docs/changelog/104198.yaml
deleted file mode 100644
index 0b5b4680c2d88..0000000000000
--- a/docs/changelog/104198.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104198
-summary: "[Connector API] Fix bug in configuration validation parser"
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/104281.yaml b/docs/changelog/104281.yaml
deleted file mode 100644
index 087e91d83ab3b..0000000000000
--- a/docs/changelog/104281.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104281
-summary: Data streams fix failure store delete
-area: Data streams
-type: bug
-issues: []
diff --git a/docs/changelog/104288.yaml b/docs/changelog/104288.yaml
deleted file mode 100644
index 67f54e37cf9dc..0000000000000
--- a/docs/changelog/104288.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104288
-summary: Don't throw error for remote shards that open PIT filtered out
-area: Search
-type: bug
-issues:
- - 102596
diff --git a/docs/changelog/104289.yaml b/docs/changelog/104289.yaml
deleted file mode 100644
index 9df8f8ecd4add..0000000000000
--- a/docs/changelog/104289.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104289
-summary: Better handling of async processor failures
-area: Ingest Node
-type: bug
-issues:
- - 101921
diff --git a/docs/changelog/104314.yaml b/docs/changelog/104314.yaml
deleted file mode 100644
index a17e810a2c023..0000000000000
--- a/docs/changelog/104314.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104314
-summary: "[LTR] `FieldValueExtrator` - Checking if fetched values is empty"
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/104418.yaml b/docs/changelog/104418.yaml
deleted file mode 100644
index d27b66cebea87..0000000000000
--- a/docs/changelog/104418.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104418
-summary: Fix `routing_path` when template has multiple `path_match` and multi-fields
-area: TSDB
-type: bug
-issues:
- - 104400
diff --git a/docs/changelog/104440.yaml b/docs/changelog/104440.yaml
new file mode 100644
index 0000000000000..4242b7786f05f
--- /dev/null
+++ b/docs/changelog/104440.yaml
@@ -0,0 +1,6 @@
+pr: 104440
+summary: Fix write index resolution when an alias is pointing to a TSDS
+area: Data streams
+type: bug
+issues:
+ - 104189
diff --git a/docs/changelog/104483.yaml b/docs/changelog/104483.yaml
new file mode 100644
index 0000000000000..99917b4e8e017
--- /dev/null
+++ b/docs/changelog/104483.yaml
@@ -0,0 +1,5 @@
+pr: 104483
+summary: Make `task_type` optional in `_inference` APIs
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/104523.yaml b/docs/changelog/104523.yaml
deleted file mode 100644
index d9e7d207dc23a..0000000000000
--- a/docs/changelog/104523.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104523
-summary: "ESQL: Allow grouping by null blocks"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/104574.yaml b/docs/changelog/104574.yaml
new file mode 100644
index 0000000000000..68be002142fd9
--- /dev/null
+++ b/docs/changelog/104574.yaml
@@ -0,0 +1,10 @@
+pr: 104574
+summary: Deprecate `client.type`
+area: Infra/Core
+type: deprecation
+issues: []
+deprecation:
+ title: Deprecate `client.type`
+ area: Cluster and node setting
+ details: The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release.
+ impact: Remove the `client.type` setting from `elasticsearch.yml`
diff --git a/docs/changelog/104585.yaml b/docs/changelog/104585.yaml
deleted file mode 100644
index 8c2b20fe54d0c..0000000000000
--- a/docs/changelog/104585.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104585
-summary: Ingest correctly handle upsert operations and drop processors together
-area: Ingest Node
-type: bug
-issues:
- - 36746
diff --git a/docs/changelog/104586.yaml b/docs/changelog/104586.yaml
deleted file mode 100644
index db1d01c22eff6..0000000000000
--- a/docs/changelog/104586.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104586
-summary: Reduce the number of Evals `ReplaceMissingFieldWithNull` creates
-area: ES|QL
-type: bug
-issues:
- - 104583
diff --git a/docs/changelog/104591.yaml b/docs/changelog/104591.yaml
deleted file mode 100644
index 0bd054385753f..0000000000000
--- a/docs/changelog/104591.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104591
-summary: Avoid execute ESQL planning on refresh thread
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/104600.yaml b/docs/changelog/104600.yaml
deleted file mode 100644
index 5337116ba37bc..0000000000000
--- a/docs/changelog/104600.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104600
-summary: "[Profiling] Query in parallel on content nodes"
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/104606.yaml b/docs/changelog/104606.yaml
deleted file mode 100644
index f419c21e0a17d..0000000000000
--- a/docs/changelog/104606.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104606
-summary: Fix bug when `latest` transform is used together with `from` parameter
-area: Transform
-type: bug
-issues:
- - 104543
diff --git a/docs/changelog/104614.yaml b/docs/changelog/104614.yaml
new file mode 100644
index 0000000000000..9b2c25a643825
--- /dev/null
+++ b/docs/changelog/104614.yaml
@@ -0,0 +1,6 @@
+pr: 104614
+summary: Extend `repository_integrity` health indicator for unknown and invalid repos
+area: Health
+type: enhancement
+issues:
+ - 103784
diff --git a/docs/changelog/104636.yaml b/docs/changelog/104636.yaml
new file mode 100644
index 0000000000000..d74682f2eba18
--- /dev/null
+++ b/docs/changelog/104636.yaml
@@ -0,0 +1,5 @@
+pr: 104636
+summary: Modifying request builders
+area: Ingest Node
+type: enhancement
+issues: []
diff --git a/docs/changelog/104722.yaml b/docs/changelog/104722.yaml
deleted file mode 100644
index ed9f2d41ff908..0000000000000
--- a/docs/changelog/104722.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104722
-summary: Avoid possible datafeed infinite loop with filtering aggregations
-area: Machine Learning
-type: bug
-issues:
- - 104699
diff --git a/docs/changelog/104802.yaml b/docs/changelog/104802.yaml
deleted file mode 100644
index d535318043ca2..0000000000000
--- a/docs/changelog/104802.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104802
-summary: "[Connectors API] Fix bug when triggering a sync job via API"
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/104808.yaml b/docs/changelog/104808.yaml
deleted file mode 100644
index 7682db085c7a9..0000000000000
--- a/docs/changelog/104808.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104808
-summary: Fix lost headers with chunked responses
-area: Network
-type: bug
-issues: []
diff --git a/docs/changelog/104832.yaml b/docs/changelog/104832.yaml
deleted file mode 100644
index 89f837b1c3475..0000000000000
--- a/docs/changelog/104832.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104832
-summary: Limit concurrent shards per node for ESQL
-area: ES|QL
-type: bug
-issues:
- - 103666
diff --git a/docs/changelog/104872.yaml b/docs/changelog/104872.yaml
new file mode 100644
index 0000000000000..ad70946be02ae
--- /dev/null
+++ b/docs/changelog/104872.yaml
@@ -0,0 +1,5 @@
+pr: 104872
+summary: Add new int8_flat and flat vector index types
+area: Vector Search
+type: enhancement
+issues: []
diff --git a/docs/changelog/104891.yaml b/docs/changelog/104891.yaml
deleted file mode 100644
index 690f2c4b11f88..0000000000000
--- a/docs/changelog/104891.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 104891
-summary: "ESQL: Fix `SearchStats#count(String)` to count values not rows"
-area: ES|QL
-type: bug
-issues:
- - 104795
diff --git a/docs/changelog/104904.yaml b/docs/changelog/104904.yaml
deleted file mode 100644
index 07e22feb144ed..0000000000000
--- a/docs/changelog/104904.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 104904
-summary: Improve `CANNOT_REBALANCE_CAN_ALLOCATE` explanation
-area: Allocation
-type: bug
-issues: []
diff --git a/docs/changelog/104905.yaml b/docs/changelog/104905.yaml
new file mode 100644
index 0000000000000..80e06dc3b0cf5
--- /dev/null
+++ b/docs/changelog/104905.yaml
@@ -0,0 +1,6 @@
+pr: 104905
+summary: "Execute lazy rollover with an internal dedicated user #104732"
+area: Data streams
+type: bug
+issues:
+ - 104732
diff --git a/docs/changelog/104949.yaml b/docs/changelog/104949.yaml
new file mode 100644
index 0000000000000..c2682fc911f1d
--- /dev/null
+++ b/docs/changelog/104949.yaml
@@ -0,0 +1,5 @@
+pr: 104949
+summary: Add text_embedding inference service with multilingual-e5 and custom eland models
+area: Machine Learning
+type: enhancement
+issues: [ ]
diff --git a/docs/changelog/104958.yaml b/docs/changelog/104958.yaml
new file mode 100644
index 0000000000000..936342db03b45
--- /dev/null
+++ b/docs/changelog/104958.yaml
@@ -0,0 +1,5 @@
+pr: 104958
+summary: "ESQL: Extend STATS command to support aggregate expressions"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/105024.yaml b/docs/changelog/105024.yaml
new file mode 100644
index 0000000000000..96268b78ddf5d
--- /dev/null
+++ b/docs/changelog/105024.yaml
@@ -0,0 +1,6 @@
+pr: 105024
+summary: "[Connectors API] Fix bug with crawler configuration parsing and `sync_now`\
+ \ flag"
+area: Application
+type: bug
+issues: []
diff --git a/docs/changelog/105044.yaml b/docs/changelog/105044.yaml
new file mode 100644
index 0000000000000..5a9a11f928f98
--- /dev/null
+++ b/docs/changelog/105044.yaml
@@ -0,0 +1,5 @@
+pr: 105044
+summary: Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/docs/changelog/105048.yaml b/docs/changelog/105048.yaml
new file mode 100644
index 0000000000000..d865f447a0a93
--- /dev/null
+++ b/docs/changelog/105048.yaml
@@ -0,0 +1,6 @@
+pr: 105048
+summary: "ES|QL: Fix exception handling on `date_parse` with wrong date pattern"
+area: ES|QL
+type: bug
+issues:
+ - 104124
diff --git a/docs/changelog/105055.yaml b/docs/changelog/105055.yaml
new file mode 100644
index 0000000000000..0db70a6b9e558
--- /dev/null
+++ b/docs/changelog/105055.yaml
@@ -0,0 +1,5 @@
+pr: 105055
+summary: "Do not enable APM agent 'instrument', it's not required for manual tracing"
+area: Infra/Core
+type: bug
+issues: []
diff --git a/docs/changelog/105061.yaml b/docs/changelog/105061.yaml
new file mode 100644
index 0000000000000..ae8a36183e0e7
--- /dev/null
+++ b/docs/changelog/105061.yaml
@@ -0,0 +1,6 @@
+pr: 105061
+summary: "ESQL: Push CIDR_MATCH to Lucene if possible"
+area: ES|QL
+type: bug
+issues:
+ - 105042
diff --git a/docs/changelog/105062.yaml b/docs/changelog/105062.yaml
new file mode 100644
index 0000000000000..928786f62381a
--- /dev/null
+++ b/docs/changelog/105062.yaml
@@ -0,0 +1,5 @@
+pr: 105062
+summary: Nest pass-through objects within objects
+area: TSDB
+type: enhancement
+issues: []
diff --git a/docs/changelog/105064.yaml b/docs/changelog/105064.yaml
new file mode 100644
index 0000000000000..81c62b3148f1c
--- /dev/null
+++ b/docs/changelog/105064.yaml
@@ -0,0 +1,17 @@
+pr: 105064
+summary: "ES|QL: remove PROJECT keyword from the grammar"
+area: ES|QL
+type: breaking
+issues: []
+breaking:
+ title: "ES|QL: remove PROJECT keyword from the grammar"
+ area: REST API
+ details: "Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar"
+ impact: "Before this change, users could use PROJECT as an alias for KEEP in ESQL queries,\
+ \ (eg. 'FROM idx | PROJECT name, surname')\
+ \ the parser replaced PROJECT with KEEP, emitted a warning:\
+ \ 'PROJECT command is no longer supported, please use KEEP instead'\
+ \ and the query was executed normally.\
+ \ With this change, PROJECT command is no longer recognized by the query parser;\
+ \ queries using PROJECT command now return a parsing exception."
+ notable: false
diff --git a/docs/changelog/105066.yaml b/docs/changelog/105066.yaml
new file mode 100644
index 0000000000000..95757a9edaf81
--- /dev/null
+++ b/docs/changelog/105066.yaml
@@ -0,0 +1,5 @@
+pr: 105066
+summary: Fix handling of `ml.config_version` node attribute for nodes with machine learning disabled
+area: Machine Learning
+type: bug
+issues: []
diff --git a/docs/changelog/105070.yaml b/docs/changelog/105070.yaml
new file mode 100644
index 0000000000000..ff4c115e21eea
--- /dev/null
+++ b/docs/changelog/105070.yaml
@@ -0,0 +1,5 @@
+pr: 105070
+summary: Validate settings before reloading JWT shared secret
+area: Authentication
+type: bug
+issues: []
diff --git a/docs/changelog/105081.yaml b/docs/changelog/105081.yaml
new file mode 100644
index 0000000000000..efa686bd7b4a4
--- /dev/null
+++ b/docs/changelog/105081.yaml
@@ -0,0 +1,6 @@
+pr: 105081
+summary: For empty mappings use a `LocalRelation`
+area: ES|QL
+type: bug
+issues:
+ - 104809
diff --git a/docs/changelog/105089.yaml b/docs/changelog/105089.yaml
new file mode 100644
index 0000000000000..6f43c58af8a41
--- /dev/null
+++ b/docs/changelog/105089.yaml
@@ -0,0 +1,6 @@
+pr: 105089
+summary: Return results in order
+area: Transform
+type: bug
+issues:
+ - 104847
diff --git a/docs/changelog/105096.yaml b/docs/changelog/105096.yaml
new file mode 100644
index 0000000000000..bfc72a6277bb1
--- /dev/null
+++ b/docs/changelog/105096.yaml
@@ -0,0 +1,5 @@
+pr: 105096
+summary: Harden index mapping parameter check in enrich runner
+area: Ingest Node
+type: bug
+issues: []
diff --git a/docs/changelog/105131.yaml b/docs/changelog/105131.yaml
new file mode 100644
index 0000000000000..36993527da583
--- /dev/null
+++ b/docs/changelog/105131.yaml
@@ -0,0 +1,5 @@
+pr: 105131
+summary: "[Connector API] Support filtering by name, index name in list action"
+area: Application
+type: enhancement
+issues: []
diff --git a/docs/changelog/105153.yaml b/docs/changelog/105153.yaml
new file mode 100644
index 0000000000000..6c6b1f995df4b
--- /dev/null
+++ b/docs/changelog/105153.yaml
@@ -0,0 +1,6 @@
+pr: 105153
+summary: Field-caps should read fields from up-to-dated shards
+area: "Search"
+type: bug
+issues:
+ - 104809
diff --git a/docs/changelog/105164.yaml b/docs/changelog/105164.yaml
new file mode 100644
index 0000000000000..7affb0911bc6d
--- /dev/null
+++ b/docs/changelog/105164.yaml
@@ -0,0 +1,6 @@
+pr: 105164
+summary: Remove duplicate checkpoint audits
+area: Transform
+type: bug
+issues:
+ - 105106
diff --git a/docs/changelog/105178.yaml b/docs/changelog/105178.yaml
new file mode 100644
index 0000000000000..e8fc9cfd6898f
--- /dev/null
+++ b/docs/changelog/105178.yaml
@@ -0,0 +1,5 @@
+pr: 105178
+summary: "[Connector API] Support filtering connectors by service type and a query"
+area: Application
+type: enhancement
+issues: []
diff --git a/docs/changelog/105183.yaml b/docs/changelog/105183.yaml
new file mode 100644
index 0000000000000..04ec159cf02d0
--- /dev/null
+++ b/docs/changelog/105183.yaml
@@ -0,0 +1,7 @@
+pr: 105183
+summary: Fix handling surrogate pairs in the XLM Roberta tokenizer
+area: Machine Learning
+type: bug
+issues:
+ - 104626
+ - 104981
diff --git a/docs/changelog/105192.yaml b/docs/changelog/105192.yaml
new file mode 100644
index 0000000000000..b15d58ef40fe7
--- /dev/null
+++ b/docs/changelog/105192.yaml
@@ -0,0 +1,6 @@
+pr: 105192
+summary: Allow transforms to use PIT with remote clusters again
+area: Transform
+type: enhancement
+issues:
+ - 104518
diff --git a/docs/changelog/105213.yaml b/docs/changelog/105213.yaml
new file mode 100644
index 0000000000000..40595a8166ef2
--- /dev/null
+++ b/docs/changelog/105213.yaml
@@ -0,0 +1,5 @@
+pr: 105213
+summary: Inference service should reject tasks during shutdown
+area: Machine Learning
+type: bug
+issues: []
diff --git a/docs/changelog/105228.yaml b/docs/changelog/105228.yaml
new file mode 100644
index 0000000000000..7526a3caa81d9
--- /dev/null
+++ b/docs/changelog/105228.yaml
@@ -0,0 +1,6 @@
+pr: 105228
+summary: Downsampling better handle if source index isn't allocated and fix bug in
+ retrieving last processed tsid
+area: Downsampling
+type: bug
+issues: []
diff --git a/docs/changelog/99747.yaml b/docs/changelog/99747.yaml
new file mode 100644
index 0000000000000..94aefbf25d8e5
--- /dev/null
+++ b/docs/changelog/99747.yaml
@@ -0,0 +1,5 @@
+pr: 99747
+summary: TSDB dimensions encoding
+area: TSDB
+type: enhancement
+issues: []
diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc
index e1e27be12a36f..d7fa25a5a8d4f 100644
--- a/docs/reference/esql/esql-query-api.asciidoc
+++ b/docs/reference/esql/esql-query-api.asciidoc
@@ -67,6 +67,10 @@ precedence.
`false`. The API only supports this parameter for CBOR, JSON, SMILE, and YAML
responses. See <>.
+`locale`::
+(Optional, string) Returns results (especially dates) formatted per the conventions of the locale.
+For syntax, refer to <>.
+
`params`::
(Optional, array) Values for parameters in the `query`. For syntax, refer to
<>.
diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc
index d66ceb2eb4f1e..fc06cfea904af 100644
--- a/docs/reference/esql/esql-rest.asciidoc
+++ b/docs/reference/esql/esql-rest.asciidoc
@@ -204,6 +204,33 @@ Which returns:
}
----
+[discrete]
+[[esql-locale-param]]
+==== Returning localized results
+
+Use the `locale` parameter in the request body to return results (especially dates) formatted per the conventions of the locale.
+If `locale` is not specified, defaults to `en-US` (English).
+Refer to https://www.oracle.com/java/technologies/javase/jdk17-suported-locales.html[JDK Supported Locales].
+
+Syntax: the `locale` parameter accepts language tags in the (case-insensitive) format `xy` and `xy-XY`.
+
+For example, to return a month name in French:
+
+[source,console]
+----
+POST /_query
+{
+ "locale": "fr-FR",
+ "query": """
+ ROW birth_date_string = "2023-01-15T00:00:00.000Z"
+ | EVAL birth_date = date_parse(birth_date_string)
+ | EVAL month_of_birth = DATE_FORMAT("MMMM",birth_date)
+ | LIMIT 5
+ """
+}
+----
+// TEST[setup:library]
+
[discrete]
[[esql-rest-params]]
==== Passing parameters to a query
diff --git a/docs/reference/esql/functions/log.asciidoc b/docs/reference/esql/functions/log.asciidoc
new file mode 100644
index 0000000000000..79ea72898bc2f
--- /dev/null
+++ b/docs/reference/esql/functions/log.asciidoc
@@ -0,0 +1,48 @@
+[discrete]
+[[esql-log]]
+=== `LOG`
+
+*Syntax*
+
+[source,esql]
+----
+LOG([base,] value)
+----
+
+*Parameters*
+
+`base`::
+Numeric expression. If `null`, the function returns `null`. The base is an optional input parameter. If a base is not provided, this function returns the natural logarithm (base e) of a value.
+
+`value`::
+Numeric expression. If `null`, the function returns `null`.
+
+*Description*
+
+Returns the logarithm of a value to a base. The input can be any numeric value, the return value is always a double.
+
+Logs of zero, negative numbers, infinites and base of one return `null` as well as a warning.
+
+*Supported types*
+
+include::types/log.asciidoc[]
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/math.csv-spec[tag=log]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/math.csv-spec[tag=log-result]
+|===
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/math.csv-spec[tag=logUnary]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/math.csv-spec[tag=logUnary-result]
+|===
diff --git a/docs/reference/esql/functions/math-functions.asciidoc b/docs/reference/esql/functions/math-functions.asciidoc
index 21131ae9074d7..0ddf7412db2a1 100644
--- a/docs/reference/esql/functions/math-functions.asciidoc
+++ b/docs/reference/esql/functions/math-functions.asciidoc
@@ -18,6 +18,7 @@
* <>
* <>
* <>
+* <>
* <>
* <>
* <>
@@ -40,6 +41,7 @@ include::cos.asciidoc[]
include::cosh.asciidoc[]
include::e.asciidoc[]
include::floor.asciidoc[]
+include::log.asciidoc[]
include::log10.asciidoc[]
include::pi.asciidoc[]
include::pow.asciidoc[]
diff --git a/docs/reference/esql/functions/signature/log.svg b/docs/reference/esql/functions/signature/log.svg
new file mode 100644
index 0000000000000..39a9a7e8dc52e
--- /dev/null
+++ b/docs/reference/esql/functions/signature/log.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/types/log.asciidoc b/docs/reference/esql/functions/types/log.asciidoc
new file mode 100644
index 0000000000000..d72ea848c349f
--- /dev/null
+++ b/docs/reference/esql/functions/types/log.asciidoc
@@ -0,0 +1,20 @@
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+base | value | result
+double | double | double
+double | integer | double
+double | long | double
+double | unsigned_long | double
+integer | double | double
+integer | integer | double
+integer | long | double
+integer | unsigned_long | double
+long | double | double
+long | integer | double
+long | long | double
+long | unsigned_long | double
+unsigned_long | double | double
+unsigned_long | integer | double
+unsigned_long | long | double
+unsigned_long | unsigned_long | double
+|===
diff --git a/docs/reference/esql/processing-commands/drop.asciidoc b/docs/reference/esql/processing-commands/drop.asciidoc
index 4787c5f137314..8f03141d5e05a 100644
--- a/docs/reference/esql/processing-commands/drop.asciidoc
+++ b/docs/reference/esql/processing-commands/drop.asciidoc
@@ -22,7 +22,7 @@ The `DROP` processing command removes one or more columns.
[source,esql]
----
-include::{esql-specs}/docs.csv-spec[tag=dropheight]
+include::{esql-specs}/drop.csv-spec[tag=height]
----
Rather than specify each column by name, you can use wildcards to drop all
@@ -30,5 +30,5 @@ columns with a name that matches a pattern:
[source,esql]
----
-include::{esql-specs}/docs.csv-spec[tag=dropheightwithwildcard]
+include::{esql-specs}/drop.csv-spec[tag=heightWithWildcard]
----
diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc
index 7515583b1bfd1..57f32a68aec4c 100644
--- a/docs/reference/esql/processing-commands/keep.asciidoc
+++ b/docs/reference/esql/processing-commands/keep.asciidoc
@@ -10,6 +10,7 @@ KEEP columns
----
*Parameters*
+
`columns`::
A comma-separated list of columns to keep. Supports wildcards.
@@ -18,6 +19,17 @@ A comma-separated list of columns to keep. Supports wildcards.
The `KEEP` processing command enables you to specify what columns are returned
and the order in which they are returned.
+Precedence rules are applied when a field name matches multiple expressions.
+Fields are added in the order they appear. If one field matches multiple expressions, the following precedence rules apply (from highest to lowest priority):
+
+1. Complete field name (no wildcards)
+2. Partial wildcard expressions (for example: `fieldNam*`)
+3. Wildcard only (`*`)
+
+If a field matches two expressions with the same precedence, the right-most expression wins.
+
+Refer to the examples for illustrations of these precedence rules.
+
*Examples*
The columns are returned in the specified order:
@@ -38,12 +50,58 @@ columns with a name that matches a pattern:
----
include::{esql-specs}/docs.csv-spec[tag=keepWildcard]
----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=keep-wildcard-result]
+|===
The asterisk wildcard (`*`) by itself translates to all columns that do not
-match the other arguments. This query will first return all columns with a name
+match the other arguments.
+
+This query will first return all columns with a name
that starts with `h`, followed by all other columns:
[source,esql]
----
include::{esql-specs}/docs.csv-spec[tag=keepDoubleWildcard]
----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=keep-double-wildcard-result]
+|===
+
+The following examples show how precedence rules work when a field name matches multiple expressions.
+
+Complete field name has precedence over wildcard expressions:
+
+[source,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=keepCompleteName]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=keep-complete-name-result]
+|===
+
+Wildcard expressions have the same priority, but last one wins (despite being less specific):
+
+[source,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=keepWildcardPrecedence]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=keep-wildcard-precedence-result]
+|===
+
+A simple wildcard expression `*` has the lowest precedence.
+Output order is determined by the other arguments:
+
+[source,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=keepWildcardLowest]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=keep-wildcard-lowest-result]
+|===
diff --git a/docs/reference/esql/processing-commands/limit.asciidoc b/docs/reference/esql/processing-commands/limit.asciidoc
index 5f659fc493a75..4ccf3024a4c1e 100644
--- a/docs/reference/esql/processing-commands/limit.asciidoc
+++ b/docs/reference/esql/processing-commands/limit.asciidoc
@@ -43,5 +43,5 @@ settings:
[source,esql]
----
-include::{esql-specs}/docs.csv-spec[tag=limit]
+include::{esql-specs}/limit.csv-spec[tag=basic]
----
diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc
index 1f0a79258bd37..25d39a17af306 100644
--- a/docs/reference/indices/flush.asciidoc
+++ b/docs/reference/indices/flush.asciidoc
@@ -81,7 +81,7 @@ Defaults to `open`.
If `true`,
the request forces a flush
even if there are no changes to commit to the index.
-Defaults to `true`.
+Defaults to `false`.
You can use this parameter
to increment the generation number of the transaction log.
diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc
index 692a96212f5ca..850b4ef1b10b0 100644
--- a/docs/reference/inference/delete-inference.asciidoc
+++ b/docs/reference/inference/delete-inference.asciidoc
@@ -6,9 +6,9 @@ experimental[]
Deletes an {infer} model deployment.
-IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER,
-OpenAI, or Hugging Face, in your cluster. This is not the same feature that you
-can use on an ML node with custom {ml} models. If you want to train and use your
+IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER,
+OpenAI, or Hugging Face, in your cluster. This is not the same feature that you
+can use on an ML node with custom {ml} models. If you want to train and use your
own model, use the <>.
@@ -16,6 +16,7 @@ own model, use the <>.
[[delete-inference-api-request]]
==== {api-request-title}
+`DELETE /_inference/`
`DELETE /_inference//`
[discrete]
@@ -34,7 +35,7 @@ own model, use the <>.
The unique identifier of the {infer} model to delete.
::
-(Required, string)
+(Optional, string)
The type of {infer} task that the model performs.
@@ -42,7 +43,7 @@ The type of {infer} task that the model performs.
[[delete-inference-api-example]]
==== {api-examples-title}
-The following API call deletes the `my-elser-model` {infer} model that can
+The following API call deletes the `my-elser-model` {infer} model that can
perform `sparse_embedding` tasks.
@@ -61,4 +62,4 @@ The API returns the following response:
"acknowledged": true
}
------------------------------------------------------------
-// NOTCONSOLE
\ No newline at end of file
+// NOTCONSOLE
diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc
index 45f4cb67e7674..176909bc5458f 100644
--- a/docs/reference/inference/get-inference.asciidoc
+++ b/docs/reference/inference/get-inference.asciidoc
@@ -6,9 +6,9 @@ experimental[]
Retrieves {infer} model information.
-IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER,
-OpenAI, or Hugging Face, in your cluster. This is not the same feature that you
-can use on an ML node with custom {ml} models. If you want to train and use your
+IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER,
+OpenAI, or Hugging Face, in your cluster. This is not the same feature that you
+can use on an ML node with custom {ml} models. If you want to train and use your
own model, use the <>.
@@ -18,6 +18,8 @@ own model, use the <>.
`GET /_inference/_all`
+`GET /_inference/`
+
`GET /_inference//_all`
`GET /_inference//`
diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc
index 9ef633160f162..4fb6ea5a4fb6d 100644
--- a/docs/reference/inference/post-inference.asciidoc
+++ b/docs/reference/inference/post-inference.asciidoc
@@ -6,9 +6,9 @@ experimental[]
Performs an inference task on an input text by using an {infer} model.
-IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER,
-OpenAI, or Hugging Face, in your cluster. This is not the same feature that you
-can use on an ML node with custom {ml} models. If you want to train and use your
+IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER,
+OpenAI, or Hugging Face, in your cluster. This is not the same feature that you
+can use on an ML node with custom {ml} models. If you want to train and use your
own model, use the <>.
@@ -16,6 +16,7 @@ own model, use the <>.
[[post-inference-api-request]]
==== {api-request-title}
+`POST /_inference/`
`POST /_inference//`
@@ -46,7 +47,7 @@ The unique identifier of the {infer} model.
``::
-(Required, string)
+(Optional, string)
The type of {infer} task that the model performs.
diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc
index a2ab44a173a62..d600bc5566ace 100644
--- a/docs/reference/mapping/types/dense-vector.asciidoc
+++ b/docs/reference/mapping/types/dense-vector.asciidoc
@@ -238,21 +238,31 @@ expense of slower indexing speed.
====
`type`:::
(Required, string)
-The type of kNN algorithm to use. Can be either `hnsw` or `int8_hnsw`.
-
+The type of kNN algorithm to use. Can be either any of:
++
+--
+* `hnsw` - The default storage type. This utilizes the https://arxiv.org/abs/1603.09320[HNSW algorithm] for scalable
+ approximate kNN search. This supports all `element_type` values.
+* `int8_hnsw` - This utilizes the https://arxiv.org/abs/1603.09320[HNSW algorithm] in addition to automatically scalar
+quantization for scalable approximate kNN search with `element_type` of `float`. This can reduce the memory footprint
+by 4x at the cost of some accuracy. See <>.
+* `flat` - This utilizes a brute-force search algorithm for exact kNN search. This supports all `element_type` values.
+* `int8_flat` - This utilizes a brute-force search algorithm in addition to automatically scalar quantization. Only supports
+`element_type` of `float`.
+--
`m`:::
(Optional, integer)
The number of neighbors each node will be connected to in the HNSW graph.
-Defaults to `16`.
+Defaults to `16`. Only applicable to `hnsw` and `int8_hnsw` index types.
`ef_construction`:::
(Optional, integer)
The number of candidates to track while assembling the list of nearest
-neighbors for each new node. Defaults to `100`.
+neighbors for each new node. Defaults to `100`. Only applicable to `hnsw` and `int8_hnsw` index types.
`confidence_interval`:::
(Optional, float)
-Only applicable to `int8_hnsw` index types. The confidence interval to use when quantizing the vectors,
+Only applicable to `int8_hnsw` and `int8_flat` index types. The confidence interval to use when quantizing the vectors,
can be any value between and including `0.90` and `1.0`. This value restricts the values used when calculating
the quantization thresholds. For example, a value of `0.95` will only use the middle 95% of the values when
calculating the quantization thresholds (e.g. the highest and lowest 2.5% of values will be ignored).
diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc
index 5a7aa43155c66..1e425c77d1264 100644
--- a/docs/reference/modules/cluster/shards_allocation.asciidoc
+++ b/docs/reference/modules/cluster/shards_allocation.asciidoc
@@ -22,37 +22,55 @@ one of the active allocation ids in the cluster state.
--
+[[cluster-routing-allocation-same-shard-host]]
+`cluster.routing.allocation.same_shard.host`::
+ (<>)
+ If `true`, forbids multiple copies of a shard from being allocated to
+ distinct nodes on the same host, i.e. which have the same network
+ address. Defaults to `false`, meaning that copies of a shard may
+ sometimes be allocated to nodes on the same host. This setting is only
+ relevant if you run multiple nodes on each host.
+
`cluster.routing.allocation.node_concurrent_incoming_recoveries`::
(<>)
- How many concurrent incoming shard recoveries are allowed to happen on a node. Incoming recoveries are the recoveries
- where the target shard (most likely the replica unless a shard is relocating) is allocated on the node. Defaults to `2`.
+ How many concurrent incoming shard recoveries are allowed to happen on a
+ node. Incoming recoveries are the recoveries where the target shard (most
+ likely the replica unless a shard is relocating) is allocated on the node.
+ Defaults to `2`. Increasing this setting may cause shard movements to have
+ a performance impact on other activity in your cluster, but may not make
+ shard movements complete noticeably sooner. We do not recommend adjusting
+ this setting from its default of `2`.
`cluster.routing.allocation.node_concurrent_outgoing_recoveries`::
(<>)
- How many concurrent outgoing shard recoveries are allowed to happen on a node. Outgoing recoveries are the recoveries
- where the source shard (most likely the primary unless a shard is relocating) is allocated on the node. Defaults to `2`.
+ How many concurrent outgoing shard recoveries are allowed to happen on a
+ node. Outgoing recoveries are the recoveries where the source shard (most
+ likely the primary unless a shard is relocating) is allocated on the node.
+ Defaults to `2`. Increasing this setting may cause shard movements to have
+ a performance impact on other activity in your cluster, but may not make
+ shard movements complete noticeably sooner. We do not recommend adjusting
+ this setting from its default of `2`.
`cluster.routing.allocation.node_concurrent_recoveries`::
(<>)
- A shortcut to set both `cluster.routing.allocation.node_concurrent_incoming_recoveries` and
- `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. Defaults to 2.
-
+ A shortcut to set both
+ `cluster.routing.allocation.node_concurrent_incoming_recoveries` and
+ `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. The
+ value of this setting takes effect only when the more specific setting is
+ not configured. Defaults to `2`. Increasing this setting may cause shard
+ movements to have a performance impact on other activity in your cluster,
+ but may not make shard movements complete noticeably sooner. We do not
+ recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.node_initial_primaries_recoveries`::
- (<>)
- While the recovery of replicas happens over the network, the recovery of
- an unassigned primary after node restart uses data from the local disk.
- These should be fast so more initial primary recoveries can happen in
- parallel on the same node. Defaults to `4`.
-
-[[cluster-routing-allocation-same-shard-host]]
-`cluster.routing.allocation.same_shard.host`::
- (<>)
- If `true`, forbids multiple copies of a shard from being allocated to
- distinct nodes on the same host, i.e. which have the same network
- address. Defaults to `false`, meaning that copies of a shard may
- sometimes be allocated to nodes on the same host. This setting is only
- relevant if you run multiple nodes on each host.
+ (<>)
+ While the recovery of replicas happens over the network, the recovery of
+ an unassigned primary after node restart uses data from the local disk.
+ These should be fast so more initial primary recoveries can happen in
+ parallel on each node. Defaults to `4`. Increasing this setting may cause
+ shard recoveries to have a performance impact on other activity in your
+ cluster, but may not make shard recoveries complete noticeably sooner. We
+ do not recommend adjusting this setting from its default of `4`.
[[shards-rebalancing-settings]]
==== Shard rebalancing settings
@@ -73,38 +91,44 @@ balancer works independently within each tier.
You can use the following settings to control the rebalancing of shards across
the cluster:
-`cluster.routing.rebalance.enable`::
+`cluster.routing.allocation.allow_rebalance`::
+
--
(<>)
-Enable or disable rebalancing for specific kinds of shards:
+Specify when shard rebalancing is allowed:
-* `all` - (default) Allows shard balancing for all kinds of shards.
-* `primaries` - Allows shard balancing only for primary shards.
-* `replicas` - Allows shard balancing only for replica shards.
-* `none` - No shard balancing of any kind are allowed for any indices.
+
+* `always` - Always allow rebalancing.
+* `indices_primaries_active` - Only when all primaries in the cluster are allocated.
+* `indices_all_active` - (default) Only when all shards (primaries and replicas) in the cluster are allocated.
--
-`cluster.routing.allocation.allow_rebalance`::
+`cluster.routing.rebalance.enable`::
+
--
(<>)
-Specify when shard rebalancing is allowed:
+Enable or disable rebalancing for specific kinds of shards:
+* `all` - (default) Allows shard balancing for all kinds of shards.
+* `primaries` - Allows shard balancing only for primary shards.
+* `replicas` - Allows shard balancing only for replica shards.
+* `none` - No shard balancing of any kind are allowed for any indices.
-* `always` - Always allow rebalancing.
-* `indices_primaries_active` - Only when all primaries in the cluster are allocated.
-* `indices_all_active` - (default) Only when all shards (primaries and replicas) in the cluster are allocated.
+Rebalancing is important to ensure the cluster returns to a healthy and fully
+resilient state after a disruption. If you adjust this setting, remember to set
+it back to `all` as soon as possible.
--
`cluster.routing.allocation.cluster_concurrent_rebalance`::
(<>)
Defines the number of concurrent shard rebalances are allowed across the whole
cluster. Defaults to `2`. Note that this setting only controls the number of
-concurrent shard relocations due to imbalances in the cluster. This setting does
-not limit shard relocations due to
+concurrent shard relocations due to imbalances in the cluster. This setting
+does not limit shard relocations due to
<> or
-<>.
+<>. Increasing this setting may cause the
+cluster to use additional resources moving shards between nodes, so we
+generally do not recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.type`::
+
@@ -149,6 +173,12 @@ data stream have an estimated write load of zero.
The following settings control how {es} combines these values into an overall
measure of each node's weight.
+`cluster.routing.allocation.balance.threshold`::
+(float, <>)
+The minimum improvement in weight which triggers a rebalancing shard movement.
+Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing
+shards sooner, leaving the cluster in a more unbalanced state.
+
`cluster.routing.allocation.balance.shard`::
(float, <>)
Defines the weight factor for the total number of shards allocated to each node.
@@ -177,19 +207,25 @@ estimated number of indexing threads needed by the shard. Defaults to `10.0f`.
Raising this value increases the tendency of {es} to equalize the total write
load across nodes ahead of the other balancing variables.
-`cluster.routing.allocation.balance.threshold`::
-(float, <>)
-The minimum improvement in weight which triggers a rebalancing shard movement.
-Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing
-shards sooner, leaving the cluster in a more unbalanced state.
-
[NOTE]
====
-* It is not recommended to adjust the values of the heuristics settings. The
-default values are generally good, and although different values may improve
-the current balance, it is possible that they create problems in the future
-if the cluster or workload changes.
+* If you have a large cluster, it may be unnecessary to keep it in
+a perfectly balanced state at all times. It is less resource-intensive for the
+cluster to operate in a somewhat unbalanced state rather than to perform all
+the shard movements needed to achieve the perfect balance. If so, increase the
+value of `cluster.routing.allocation.balance.threshold` to define the
+acceptable imbalance between nodes. For instance, if you have an average of 500
+shards per node and can accept a difference of 5% (25 typical shards) between
+nodes, set `cluster.routing.allocation.balance.threshold` to `25`.
+
+* We do not recommend adjusting the values of the heuristic weight factor
+settings. The default values work well in all reasonable clusters. Although
+different values may improve the current balance in some ways, it is possible
+that they will create unexpected problems in the future or prevent it from
+gracefully handling an unexpected disruption.
+
* Regardless of the result of the balancing algorithm, rebalancing might
not be allowed due to allocation rules such as forced awareness and allocation
-filtering.
+filtering. Use the <> API to explain the current
+allocation of shards.
====
diff --git a/docs/reference/modules/indices/recovery.asciidoc b/docs/reference/modules/indices/recovery.asciidoc
index 02b70c69876ff..261c3d3fc3f24 100644
--- a/docs/reference/modules/indices/recovery.asciidoc
+++ b/docs/reference/modules/indices/recovery.asciidoc
@@ -38,8 +38,9 @@ This limit applies to each node separately. If multiple nodes in a cluster
perform recoveries at the same time, the cluster's total recovery traffic may
exceed this limit.
+
-If this limit is too high, ongoing recoveries may consume an excess of bandwidth
-and other resources, which can destabilize the cluster.
+If this limit is too high, ongoing recoveries may consume an excess of
+bandwidth and other resources, which can have a performance impact on your
+cluster and in extreme cases may destabilize it.
+
This is a dynamic setting, which means you can set it in each node's
`elasticsearch.yml` config file and you can update it dynamically using the
diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc
index 1aebf005a64e3..669402c94e9bb 100644
--- a/docs/reference/release-notes.asciidoc
+++ b/docs/reference/release-notes.asciidoc
@@ -7,6 +7,7 @@
This section summarizes the changes in each release.
* <>
+* <>
* <>
* <>
* <>
@@ -60,6 +61,7 @@ This section summarizes the changes in each release.
--
include::release-notes/8.13.0.asciidoc[]
+include::release-notes/8.12.1.asciidoc[]
include::release-notes/8.12.0.asciidoc[]
include::release-notes/8.11.4.asciidoc[]
include::release-notes/8.11.3.asciidoc[]
diff --git a/docs/reference/release-notes/8.12.1.asciidoc b/docs/reference/release-notes/8.12.1.asciidoc
new file mode 100644
index 0000000000000..9aa9a11b3bf02
--- /dev/null
+++ b/docs/reference/release-notes/8.12.1.asciidoc
@@ -0,0 +1,73 @@
+[[release-notes-8.12.1]]
+== {es} version 8.12.1
+
+Also see <>.
+
+[[bug-8.12.1]]
+[float]
+=== Bug fixes
+
+Allocation::
+* Improve `CANNOT_REBALANCE_CAN_ALLOCATE` explanation {es-pull}104904[#104904]
+
+Application::
+* [Connector API] Fix bug in configuration validation parser {es-pull}104198[#104198]
+* [Connector API] Fix bug when triggering a sync job via API {es-pull}104802[#104802]
+* [Profiling] Query in parallel on content nodes {es-pull}104600[#104600]
+
+Data streams::
+* Data streams fix failure store delete {es-pull}104281[#104281]
+* Fix _alias/ returning non-matching data streams {es-pull}104145[#104145] (issue: {es-issue}96589[#96589])
+
+Downsampling::
+* Downsampling supports `date_histogram` with tz {es-pull}103511[#103511] (issue: {es-issue}101309[#101309])
+
+ES|QL::
+* Avoid execute ESQL planning on refresh thread {es-pull}104591[#104591]
+* ESQL: Allow grouping by null blocks {es-pull}104523[#104523]
+* ESQL: Fix `SearchStats#count(String)` to count values not rows {es-pull}104891[#104891] (issue: {es-issue}104795[#104795])
+* Limit concurrent shards per node for ESQL {es-pull}104832[#104832] (issue: {es-issue}103666[#103666])
+* Reduce the number of Evals `ReplaceMissingFieldWithNull` creates {es-pull}104586[#104586] (issue: {es-issue}104583[#104583])
+
+Infra/Resiliency::
+* Limit nesting depth in Exception XContent {es-pull}103741[#103741]
+
+Ingest Node::
+* Better handling of async processor failures {es-pull}104289[#104289] (issue: {es-issue}101921[#101921])
+* Ingest correctly handle upsert operations and drop processors together {es-pull}104585[#104585] (issue: {es-issue}36746[#36746])
+
+Machine Learning::
+* Add retry logic for 500 and 503 errors for OpenAI {es-pull}103819[#103819]
+* Avoid possible datafeed infinite loop with filtering aggregations {es-pull}104722[#104722] (issue: {es-issue}104699[#104699])
+* [LTR] `FieldValueExtrator` - Checking if fetched values is empty {es-pull}104314[#104314]
+
+Network::
+* Fix lost headers with chunked responses {es-pull}104808[#104808]
+
+Search::
+* Don't throw error for remote shards that open PIT filtered out {es-pull}104288[#104288] (issue: {es-issue}102596[#102596])
+
+Snapshot/Restore::
+* Fix deleting index during snapshot finalization {es-pull}103817[#103817] (issue: {es-issue}101029[#101029])
+
+TSDB::
+* Fix `routing_path` when template has multiple `path_match` and multi-fields {es-pull}104418[#104418] (issue: {es-issue}104400[#104400])
+
+Transform::
+* Fix bug when `latest` transform is used together with `from` parameter {es-pull}104606[#104606] (issue: {es-issue}104543[#104543])
+
+[[deprecation-8.12.1]]
+[float]
+=== Deprecations
+
+Machine Learning::
+* Deprecate machine learning on Intel macOS {es-pull}104087[#104087]
+
+[[upgrade-8.12.1]]
+[float]
+=== Upgrades
+
+Search::
+* [8.12.1] Upgrade to Lucene 9.9.2 {es-pull}104761[#104761] (issue: {es-issue}104617[#104617])
+
+
diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc
index f5252ae6a884f..0452eca8fbfc9 100644
--- a/docs/reference/release-notes/highlights.asciidoc
+++ b/docs/reference/release-notes/highlights.asciidoc
@@ -27,13 +27,15 @@ Other versions:
endif::[]
-// The notable-highlights tag marks entries that
-// should be featured in the Stack Installation and Upgrade Guide:
// tag::notable-highlights[]
-// [discrete]
-// === Heading
-//
-// Description.
+
+[discrete]
+[[ga_release_of_synonyms_api]]
+=== GA Release of Synonyms API
+Removes the beta label for the Synonyms API to make it GA.
+
+{es-pull}103223[#103223]
+
// end::notable-highlights[]
diff --git a/docs/reference/rest-api/security/query-user.asciidoc b/docs/reference/rest-api/security/query-user.asciidoc
index 08ead0f389ee9..fae5b9914a05e 100644
--- a/docs/reference/rest-api/security/query-user.asciidoc
+++ b/docs/reference/rest-api/security/query-user.asciidoc
@@ -62,6 +62,13 @@ The email of the user.
`enabled`::
Specifies whether the user is enabled.
+[[security-api-query-user-query-params]]
+==== {api-query-parms-title}
+
+`with_profile_uid`::
+(Optional, boolean) Determines whether to retrieve the <> `uid`,
+if exists, for the users. Defaults to `false`.
+
====
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from]
@@ -218,6 +225,46 @@ A successful call returns a JSON structure for a user:
--------------------------------------------------
// NOTCONSOLE
+To retrieve the user `profile_uid` as part of the response:
+
+[source,console]
+--------------------------------------------------
+GET /_security/_query/user?with_profile_uid=true
+{
+ "query": {
+ "prefix": {
+ "roles": "other"
+ }
+ }
+}
+--------------------------------------------------
+// TEST[setup:jacknich_user]
+
+[source,console-result]
+--------------------------------------------------
+{
+ "total": 1,
+ "count": 1,
+ "users": [
+ {
+ "username": "jacknich",
+ "roles": [
+ "admin",
+ "other_role1"
+ ],
+ "full_name": "Jack Nicholson",
+ "email": "jacknich@example.com",
+ "metadata": {
+ "intelligence": 7
+ },
+ "enabled": true,
+ "profile_uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0"
+ }
+ ]
+}
+--------------------------------------------------
+// NOTCONSOLE
+
Use a `bool` query to issue complex logical conditions and use
`from`, `size`, `sort` to help paginate the result:
diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc
index 2ac248d5ea8e7..1077a63b00249 100644
--- a/docs/reference/settings/ml-settings.asciidoc
+++ b/docs/reference/settings/ml-settings.asciidoc
@@ -10,9 +10,12 @@
// tag::ml-settings-description-tag[]
You do not need to configure any settings to use {ml}. It is enabled by default.
-IMPORTANT: {ml-cap} uses SSE4.2 instructions, so it works only on machines whose
-CPUs {wikipedia}/SSE4#Supporting_CPUs[support] SSE4.2. If you run {es} on older
-hardware, you must disable {ml} (by setting `xpack.ml.enabled` to `false`).
+IMPORTANT: {ml-cap} uses SSE4.2 instructions on x86_64 machines, so it works only
+on x86_64 machines whose CPUs {wikipedia}/SSE4#Supporting_CPUs[support] SSE4.2.
+(This limitation does not apply to aarch64 machines.) If you run {es} on older
+x86_64 hardware, you must disable {ml} (by setting `xpack.ml.enabled` to `false`).
+In this situation you should not attempt to use {ml} functionality in your cluster
+at all.
// end::ml-settings-description-tag[]
@@ -46,7 +49,18 @@ that you use the default value for this setting on all nodes.
+
If set to `false`, the {ml} APIs are disabled on the node. For example, the node
cannot open jobs, start {dfeeds}, receive transport (internal) communication
-requests, or requests from clients (including {kib}) related to {ml} APIs.
+requests, or requests from clients (including {kib}) related to {ml} APIs. If
+`xpack.ml.enabled` is not set uniformly across all nodes in your cluster then you
+are likely to experience problems with {ml} functionality not fully working.
++
+You must not use any {ml} functionality from ingest pipelines if `xpack.ml.enabled`
+is `false` on any node. Before setting `xpack.ml.enabled` to `false` on a node,
+consider whether you really meant to just exclude `ml` from the `node.roles`.
+Excluding `ml` from the <> will stop the node from
+running {ml} jobs and NLP models, but it will still be aware that {ml} functionality
+exists. Setting `xpack.ml.enabled` to `false` should be reserved for situations
+where you cannot use {ml} functionality at all in your cluster due to hardware
+limitations as described <>.
`xpack.ml.inference_model.cache_size`::
(<>) The maximum inference cache size allowed.
diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc
index 858902bb72ef2..49501c46b8ba9 100644
--- a/docs/reference/setup/install.asciidoc
+++ b/docs/reference/setup/install.asciidoc
@@ -16,8 +16,8 @@ To set up Elasticsearch in {ecloud}, sign up for a {ess-trial}[free {ecloud} tri
If you want to install and manage {es} yourself, you can:
-* Run {es} on any Linux, MacOS, or Windows machine.
-* Run {es} in a <>.
+* Run {es} using a <>.
+* Run {es} in a <>.
* Set up and manage {es}, {kib}, {agent}, and the rest of the Elastic Stack on Kubernetes with {eck-ref}[{eck}].
TIP: To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see <>.
@@ -57,10 +57,18 @@ Elasticsearch website or from our RPM repository.
+
<>
+TIP: For a step-by-step example of setting up the {stack} on your own premises, try out our tutorial: {stack-ref}/installing-stack-demo-self.html[Installing a self-managed Elastic Stack].
+
+[discrete]
+[[elasticsearch-docker-images]]
+=== Elasticsearch container images
+
+You can also run {es} inside a container image.
+
+[horizontal]
`docker`::
-Images are available for running Elasticsearch as Docker containers. They may be
-downloaded from the Elastic Docker Registry.
+Docker container images may be downloaded from the Elastic Docker Registry.
+
{ref}/docker.html[Install {es} with Docker]
diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc
index 8dfbca8c63210..a30c8c313b263 100644
--- a/docs/reference/setup/install/rpm.asciidoc
+++ b/docs/reference/setup/install/rpm.asciidoc
@@ -19,6 +19,8 @@ NOTE: Elasticsearch includes a bundled version of https://openjdk.java.net[OpenJ
from the JDK maintainers (GPLv2+CE). To use your own version of Java,
see the <>
+TIP: For a step-by-step example of setting up the {stack} on your own premises, try out our tutorial: {stack-ref}/installing-stack-demo-self.html[Installing a self-managed Elastic Stack].
+
[[rpm-key]]
==== Import the Elasticsearch GPG Key
diff --git a/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc b/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc
index ceb282a3966f5..a3910675b1632 100644
--- a/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc
+++ b/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc
@@ -89,7 +89,7 @@ https://www.elastic.co/guide/en/cloud-enterprise/current/ece-manage-repositories
if you are using such a deployment.
One common failure scenario is repository corruption. This occurs most often when multiple instances of {es} write to
-the same repository location. There is a <> to fix this problem.
+the same repository location. There is a <> to fix this problem.
In the event that snapshots are failing for other reasons check the logs on the elected master node during the snapshot
execution period for more information.
@@ -163,7 +163,7 @@ Snapshots can fail for a variety reasons. If the failures are due to configurati
documentation for the repository that the automated snapshots are using.
One common failure scenario is repository corruption. This occurs most often when multiple instances of {es} write to
-the same repository location. There is a <> to fix this problem.
+the same repository location. There is a <> to fix this problem.
In the event that snapshots are failing for other reasons check the logs on the elected master node during the snapshot
execution period for more information.
diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc
index de1f9e6c7a608..64df699d33638 100644
--- a/docs/reference/troubleshooting.asciidoc
+++ b/docs/reference/troubleshooting.asciidoc
@@ -43,7 +43,7 @@ fix problems that an {es} deployment might encounter.
[[troubleshooting-snapshot]]
=== Snapshot and restore
* <>
-* <>
+* <>
* <>
[discrete]
diff --git a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc
index dc2ce5a4bc252..0de4667bd9688 100644
--- a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc
+++ b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc
@@ -1,8 +1,15 @@
[[add-repository]]
-== Multiple deployments writing to the same snapshot repository
+== Troubleshooting broken repositories
-Multiple {es} deployments are writing to the same snapshot repository. {es} doesn't
-support this configuration and only one cluster is allowed to write to the same
+There are several situations where the <> might report an issue
+regarding the integrity of snapshot repositories in the cluster. This page explains
+the recommended actions for diagnosing corrupted, unknown, and invalid repositories.
+
+[[diagnosing-corrupted-repositories]]
+=== Diagnosing corrupted repositories
+
+Multiple {es} deployments are writing to the same snapshot repository. {es} doesn't
+support this configuration and only one cluster is allowed to write to the same
repository. See <> for potential side-effects of
corruption of the repository contents, which may not be resolved by the following
guide.
@@ -11,3 +18,29 @@ other deployments, and re-add (recreate) the repository in the current deploymen
include::{es-repo-dir}/tab-widgets/troubleshooting/snapshot/corrupt-repository-widget.asciidoc[]
+
+[[diagnosing-unknown-repositories]]
+=== Diagnosing unknown repositories
+
+When a snapshot repository is marked as "unknown", it means that an {es} node is
+unable to instantiate the repository due to an unknown repository type. This is
+usually caused by a missing plugin on the node. Make sure each node in the cluster
+has the required plugins by following the following steps:
+
+1. Retrieve the affected nodes from the affected resources section of the health report.
+2. Use the <> to retrieve the plugins installed on each node.
+3. Cross reference this with a node that works correctly to find out which plugins are missing
+and install the missing plugins.
+
+
+[[diagnosing-invalid-repositories]]
+=== Diagnosing invalid repositories
+
+When an {es} node faces an unexpected exception when trying to instantiate a snapshot
+repository, it will mark the repository as "invalid" and write a warning to the log file.
+Use the following steps to diagnose the underlying cause of this issue:
+
+1. Retrieve the affected nodes from the affected resources section of the health report.
+2. Refer to the logs of the affected node(s) and search for the repository name.
+You should be able to find logs that will contain relevant exception.
+3. Try to resolve the errors reported.
diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle
index 5c9dd49c007b8..15a79364559a2 100644
--- a/libs/x-content/build.gradle
+++ b/libs/x-content/build.gradle
@@ -6,44 +6,17 @@
* Side Public License, v 1.
*/
-
-import org.elasticsearch.gradle.transform.UnzipTransform
-import org.elasticsearch.gradle.internal.GenerateProviderManifest
-import org.gradle.api.internal.artifacts.ArtifactAttributes
-
-import java.util.stream.Collectors
-
apply plugin: 'elasticsearch.build'
apply plugin: 'elasticsearch.publish'
+apply plugin: 'elasticsearch.embedded-providers'
-def isImplAttr = Attribute.of("is.impl", Boolean)
-
-configurations {
- providerImpl {
- attributes.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE)
- attributes.attribute(isImplAttr, true)
- }
+embeddedProviders {
+ impl 'x-content', project(':libs:elasticsearch-x-content:impl')
}
dependencies {
- registerTransform(
- UnzipTransform.class, transformSpec -> {
- transformSpec.getFrom()
- .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.JAR_TYPE)
- .attribute(isImplAttr, true)
- transformSpec.getTo()
- .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE)
- .attribute(isImplAttr, true)
- transformSpec.parameters(parameters -> {
- parameters.includeArtifactName.set(true)
- })
-
- })
-
api project(':libs:elasticsearch-core')
- providerImpl project(':libs:elasticsearch-x-content:impl')
-
testImplementation(project(":test:framework")) {
exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content'
}
@@ -66,18 +39,3 @@ tasks.named("thirdPartyAudit").configure {
tasks.named("dependencyLicenses").configure {
mapping from: /jackson-.*/, to: 'jackson'
}
-
-Directory generatedResourcesDir = layout.buildDirectory.dir('generated-resources').get()
-def generateProviderManifest = tasks.register("generateProviderManifest", GenerateProviderManifest.class) {
- manifestFile = generatedResourcesDir.file("LISTING.TXT")
- getProviderImplClasspath().from(configurations.providerImpl)
-}
-
-def generateProviderImpl = tasks.register("generateProviderImpl", Sync) {
- destinationDir = generatedResourcesDir.dir("impl").getAsFile()
- into("IMPL-JARS/x-content") {
- from(configurations.providerImpl)
- from(generateProviderManifest)
- }
-}
-sourceSets.main.output.dir(generateProviderImpl)
diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
index d63c61eea876c..41512af0f79d4 100644
--- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
+++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
@@ -976,7 +976,7 @@ public XContentBuilder value(Map map) throws IOException {
return map(map);
}
- private XContentBuilder value(ToXContent value, ToXContent.Params params) throws IOException {
+ public XContentBuilder value(ToXContent value, ToXContent.Params params) throws IOException {
if (value == null) {
return nullValue();
}
diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java
index b1db2f8a7d3a1..97c75689fe5dc 100644
--- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java
+++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java
@@ -70,7 +70,7 @@ public void setUp() throws Exception {
afterIndex = randomAlphaOfLength(12).toLowerCase(Locale.ROOT);
startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(START_TIME);
endTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(END_TIME);
- numTimeSeries = 5_000;
+ numTimeSeries = 500;
// NOTE: we need to use few dimensions to be able to index documents in an index created before introducing TSID hashing
numDimensions = randomIntBetween(10, 20);
@@ -275,20 +275,14 @@ public String toString() {
@Override
public Iterator iterator() {
- return new TimeSeriesIterator(this.dataset.entrySet());
+ return new TimeSeriesIterator(this.dataset.entrySet().iterator());
}
public int size() {
return this.dataset.size();
}
- static class TimeSeriesIterator implements Iterator {
-
- private final Iterator> it;
-
- TimeSeriesIterator(final Set> entries) {
- this.it = entries.iterator();
- }
+ record TimeSeriesIterator(Iterator> it) implements Iterator {
@Override
public boolean hasNext() {
diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java
index c17cc004e25b5..745585901311a 100644
--- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java
+++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java
@@ -205,18 +205,17 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
+ assert buckets.isEmpty() == false;
InternalBucket reduced = null;
- List aggregationsList = new ArrayList<>(buckets.size());
for (InternalBucket bucket : buckets) {
if (reduced == null) {
reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations);
} else {
reduced.docCount += bucket.docCount;
}
- aggregationsList.add(bucket.aggregations);
}
- reduced.aggregations = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ reduced.aggregations = InternalAggregations.reduce(aggregations, context);
return reduced;
}
diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java
index de36a9721fe38..78f6d67b0f748 100644
--- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java
+++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java
@@ -410,14 +410,13 @@ private List mergeBuckets(
@Override
protected Bucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregations = new ArrayList<>(buckets.size());
+ assert buckets.isEmpty() == false;
long docCount = 0;
for (Bucket bucket : buckets) {
docCount += bucket.docCount;
- aggregations.add(bucket.getAggregations());
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return new InternalAutoDateHistogram.Bucket(buckets.get(0).key, docCount, format, aggs);
}
diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java
index 67a7773fd01bb..725bd5673bccf 100644
--- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java
+++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java
@@ -255,16 +255,15 @@ public InternalBucket createBucket(InternalAggregations aggregations, InternalBu
@Override
protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) {
InternalTimeSeries.InternalBucket reduced = null;
- List aggregationsList = new ArrayList<>(buckets.size());
for (InternalTimeSeries.InternalBucket bucket : buckets) {
if (reduced == null) {
reduced = new InternalTimeSeries.InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed);
} else {
reduced.docCount += bucket.docCount;
}
- aggregationsList.add(bucket.aggregations);
}
- reduced.aggregations = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ reduced.aggregations = InternalAggregations.reduce(aggregations, context);
return reduced;
}
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
index 88359d32a628c..0bbaca00d1e2e 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
@@ -44,7 +44,6 @@ public void addClusterSettingsListeners(ClusterService clusterService, APMTeleme
clusterSettings.addSettingsUpdateConsumer(TELEMETRY_TRACING_ENABLED_SETTING, enabled -> {
apmTracer.setEnabled(enabled);
- this.setAgentSetting("instrument", Boolean.toString(enabled));
// The agent records data other than spans, e.g. JVM metrics, so we toggle this setting in order to
// minimise its impact to a running Elasticsearch.
boolean recording = enabled || clusterSettings.get(TELEMETRY_METRICS_ENABLED_SETTING);
@@ -73,7 +72,6 @@ public void initAgentSystemProperties(Settings settings) {
boolean metrics = TELEMETRY_METRICS_ENABLED_SETTING.get(settings);
this.setAgentSetting("recording", Boolean.toString(tracing || metrics));
- this.setAgentSetting("instrument", Boolean.toString(tracing));
// Apply values from the settings in the cluster state
APM_AGENT_SETTINGS.getAsMap(settings).forEach(this::setAgentSetting);
}
@@ -120,7 +118,8 @@ public void setAgentSetting(String key, String value) {
// Core:
// forbid 'enabled', must remain enabled to dynamically enable tracing / metrics
- // forbid 'recording' / 'instrument', controlled by 'telemetry.metrics.enabled' / 'telemetry.tracing.enabled'
+ // forbid 'recording', controlled by 'telemetry.metrics.enabled' / 'telemetry.tracing.enabled'
+ // forbid 'instrument', automatic instrumentation can cause issues
"service_name",
"service_node_name",
// forbid 'service_version', forced by APMJvmOptions
diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
index d7ae93aded3de..f075f4fc39cfd 100644
--- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
+++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
@@ -60,13 +60,11 @@ public void testEnableTracing() {
apmAgentSettings.initAgentSystemProperties(update);
verify(apmAgentSettings).setAgentSetting("recording", "true");
- verify(apmAgentSettings).setAgentSetting("instrument", "true");
clearInvocations(apmAgentSettings);
Settings initial = Settings.builder().put(update).put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), false).build();
triggerUpdateConsumer(initial, update);
verify(apmAgentSettings).setAgentSetting("recording", "true");
- verify(apmAgentSettings).setAgentSetting("instrument", "true");
verify(apmTelemetryProvider.getTracer()).setEnabled(true);
}
}
@@ -76,7 +74,6 @@ public void testEnableTracingUsingLegacySetting() {
apmAgentSettings.initAgentSystemProperties(settings);
verify(apmAgentSettings).setAgentSetting("recording", "true");
- verify(apmAgentSettings).setAgentSetting("instrument", "true");
}
public void testEnableMetrics() {
@@ -90,7 +87,6 @@ public void testEnableMetrics() {
apmAgentSettings.initAgentSystemProperties(update);
verify(apmAgentSettings).setAgentSetting("recording", "true");
- verify(apmAgentSettings).setAgentSetting("instrument", Boolean.toString(tracingEnabled));
clearInvocations(apmAgentSettings);
Settings initial = Settings.builder().put(update).put(TELEMETRY_METRICS_ENABLED_SETTING.getKey(), false).build();
@@ -114,13 +110,11 @@ public void testDisableTracing() {
apmAgentSettings.initAgentSystemProperties(update);
verify(apmAgentSettings).setAgentSetting("recording", Boolean.toString(metricsEnabled));
- verify(apmAgentSettings).setAgentSetting("instrument", "false");
clearInvocations(apmAgentSettings);
Settings initial = Settings.builder().put(update).put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true).build();
triggerUpdateConsumer(initial, update);
verify(apmAgentSettings).setAgentSetting("recording", Boolean.toString(metricsEnabled));
- verify(apmAgentSettings).setAgentSetting("instrument", "false");
verify(apmTelemetryProvider.getTracer()).setEnabled(false);
}
}
@@ -130,7 +124,6 @@ public void testDisableTracingUsingLegacySetting() {
apmAgentSettings.initAgentSystemProperties(settings);
verify(apmAgentSettings).setAgentSetting("recording", "false");
- verify(apmAgentSettings).setAgentSetting("instrument", "false");
}
public void testDisableMetrics() {
@@ -144,7 +137,6 @@ public void testDisableMetrics() {
apmAgentSettings.initAgentSystemProperties(update);
verify(apmAgentSettings).setAgentSetting("recording", Boolean.toString(tracingEnabled));
- verify(apmAgentSettings).setAgentSetting("instrument", Boolean.toString(tracingEnabled));
clearInvocations(apmAgentSettings);
Settings initial = Settings.builder().put(update).put(TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java
index d89dbc346c7e0..978dffb965ac8 100644
--- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java
+++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java
@@ -11,7 +11,18 @@
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
-
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.test.cluster.ElasticsearchCluster;
+import org.elasticsearch.test.cluster.FeatureFlag;
+import org.elasticsearch.test.cluster.local.distribution.DistributionType;
+import org.elasticsearch.test.cluster.util.resource.Resource;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.junit.Before;
+import org.junit.ClassRule;
+
+import java.io.IOException;
import java.util.List;
import java.util.Map;
@@ -21,10 +32,51 @@
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.startsWith;
-public class LazyRolloverDataStreamIT extends DisabledSecurityDataStreamTestCase {
+public class LazyRolloverDataStreamIT extends ESRestTestCase {
+
+ private static final String PASSWORD = "secret-test-password";
+ private static final String DATA_STREAM_NAME = "lazy-ds";
+
+ @ClassRule
+ public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
+ .distribution(DistributionType.DEFAULT)
+ .feature(FeatureFlag.FAILURE_STORE_ENABLED)
+ .setting("xpack.watcher.enabled", "false")
+ .setting("xpack.ml.enabled", "false")
+ .setting("xpack.security.enabled", "true")
+ .setting("xpack.security.transport.ssl.enabled", "false")
+ .setting("xpack.security.http.ssl.enabled", "false")
+ .user("test_admin", PASSWORD, "superuser", false)
+ .user("test_simple_user", PASSWORD, "not_privileged", false)
+ .rolesFile(Resource.fromClasspath("roles.yml"))
+ .build();
+
+ @Override
+ protected String getTestRestCluster() {
+ return cluster.getHttpAddresses();
+ }
- @SuppressWarnings("unchecked")
- public void testLazyRollover() throws Exception {
+ @Override
+ protected Settings restClientSettings() {
+ // If this test is running in a test framework that handles its own authorization, we don't want to overwrite it.
+ if (super.restClientSettings().keySet().contains(ThreadContext.PREFIX + ".Authorization")) {
+ return super.restClientSettings();
+ } else {
+ // Note: This user is assigned the role "manage_data_stream_lifecycle". That role is defined in roles.yml.
+ String token = basicAuthHeaderValue("test_simple_user", new SecureString(PASSWORD.toCharArray()));
+ return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
+ }
+ }
+
+ @Override
+ protected Settings restAdminSettings() {
+ String authKey = ThreadContext.PREFIX + ".Authorization";
+ String token = basicAuthHeaderValue("test_admin", new SecureString(PASSWORD.toCharArray()));
+ return Settings.builder().put(authKey, token).build();
+ }
+
+ @Before
+ public void setUpDataStreamAsAdmin() throws IOException {
Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/lazy-ds-template");
putComposableIndexTemplateRequest.setJsonEntity("""
{
@@ -32,15 +84,28 @@ public void testLazyRollover() throws Exception {
"data_stream": {}
}
""");
- assertOK(client().performRequest(putComposableIndexTemplateRequest));
-
- String dataStreamName = "lazy-ds";
+ assertOK(adminClient().performRequest(putComposableIndexTemplateRequest));
+ assertOK(adminClient().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME)));
+ }
- Request createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true");
+ @SuppressWarnings("unchecked")
+ public void testLazyRollover() throws Exception {
+ Request createDocRequest = new Request("POST", "/" + DATA_STREAM_NAME + "/_doc?refresh=true");
createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-22\", \"a\": 1 }");
assertOK(client().performRequest(createDocRequest));
- final Response rolloverResponse = client().performRequest(new Request("POST", "/" + dataStreamName + "/_rollover?lazy"));
+ {
+ ResponseException responseError = expectThrows(
+ ResponseException.class,
+ () -> client().performRequest(new Request("POST", "/" + DATA_STREAM_NAME + "/_rollover?lazy"))
+ );
+ assertThat(responseError.getResponse().getStatusLine().getStatusCode(), is(403));
+ assertThat(
+ responseError.getMessage(),
+ containsString("action [indices:admin/rollover] is unauthorized for user [test_simple_user]")
+ );
+ }
+ final Response rolloverResponse = adminClient().performRequest(new Request("POST", "/" + DATA_STREAM_NAME + "/_rollover?lazy"));
Map rolloverResponseMap = entityAsMap(rolloverResponse);
assertThat((String) rolloverResponseMap.get("old_index"), startsWith(".ds-lazy-ds-"));
assertThat((String) rolloverResponseMap.get("old_index"), endsWith("-000001"));
@@ -53,25 +118,25 @@ public void testLazyRollover() throws Exception {
assertThat(rolloverResponseMap.get("conditions"), equalTo(Map.of()));
{
- final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + dataStreamName));
+ final Response dataStreamResponse = adminClient().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME));
List
*/
public IndexRequest source(XContentType xContentType, Object... source) {
+ return source(getXContentBuilder(xContentType, source));
+ }
+
+ /**
+ * Returns an XContentBuilder for the given xContentType and source array
+ *
+ * Note: the number of objects passed to this method as varargs must be an even
+ * number. Also the first argument in each pair (the field name) must have a
+ * valid String representation.
+ *
+ */
+ public static XContentBuilder getXContentBuilder(XContentType xContentType, Object... source) {
if (source.length % 2 != 0) {
throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
}
@@ -489,11 +508,14 @@ public IndexRequest source(XContentType xContentType, Object... source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
builder.startObject();
- for (int i = 0; i < source.length; i++) {
- builder.field(source[i++].toString(), source[i]);
+ // This for loop increments by 2 because the source array contains adjacent key/value pairs:
+ for (int i = 0; i < source.length; i = i + 2) {
+ String field = source[i].toString();
+ Object value = source[i + 1];
+ builder.field(field, value);
}
builder.endObject();
- return source(builder);
+ return builder;
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate", e);
}
@@ -821,7 +843,25 @@ public IndexRequest setRequireDataStream(boolean requireDataStream) {
@Override
public Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) {
- return ia.getWriteIndex(this, metadata);
+ if (DataStream.isFailureStoreEnabled() && writeToFailureStore) {
+ if (ia.isDataStreamRelated() == false) {
+ throw new ElasticsearchException(
+ "Attempting to write a document to a failure store but the targeted index is not a data stream"
+ );
+ }
+ // Resolve write index and get parent data stream to handle the case of dealing with an alias
+ String defaultWriteIndexName = ia.getWriteIndex().getName();
+ DataStream dataStream = metadata.getIndicesLookup().get(defaultWriteIndexName).getParentDataStream();
+ if (dataStream.getFailureIndices().size() < 1) {
+ throw new ElasticsearchException(
+ "Attempting to write a document to a failure store but the target data stream does not have one enabled"
+ );
+ }
+ return dataStream.getFailureIndices().get(dataStream.getFailureIndices().size() - 1);
+ } else {
+ // Resolve as normal
+ return ia.getWriteIndex(this, metadata);
+ }
}
@Override
@@ -834,6 +874,15 @@ public IndexRequest setRequireAlias(boolean requireAlias) {
return this;
}
+ public boolean isWriteToFailureStore() {
+ return writeToFailureStore;
+ }
+
+ public IndexRequest setWriteToFailureStore(boolean writeToFailureStore) {
+ this.writeToFailureStore = writeToFailureStore;
+ return this;
+ }
+
public IndexRequest setListExecutedPipelines(boolean listExecutedPipelines) {
this.listExecutedPipelines = listExecutedPipelines;
return this;
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
index b8faf39514cbe..0cb04fbdba1a6 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
@@ -8,17 +8,23 @@
package org.elasticsearch.action.index;
+import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
+import org.elasticsearch.client.internal.Requests;
+import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentType;
+import java.io.IOException;
import java.util.Map;
/**
@@ -27,13 +33,30 @@
public class IndexRequestBuilder extends ReplicationRequestBuilder
implements
WriteRequestBuilder {
+ private String id = null;
+
+ private BytesReference sourceBytesReference;
+ private XContentType sourceContentType;
+
+ private String pipeline;
+ private Boolean requireAlias;
+ private Boolean requireDataStream;
+ private String routing;
+ private WriteRequest.RefreshPolicy refreshPolicy;
+ private Long ifSeqNo;
+ private Long ifPrimaryTerm;
+ private DocWriteRequest.OpType opType;
+ private Boolean create;
+ private Long version;
+ private VersionType versionType;
public IndexRequestBuilder(ElasticsearchClient client) {
- super(client, TransportIndexAction.TYPE, new IndexRequest());
+ this(client, null);
}
public IndexRequestBuilder(ElasticsearchClient client, @Nullable String index) {
- super(client, TransportIndexAction.TYPE, new IndexRequest(index));
+ super(client, TransportIndexAction.TYPE);
+ setIndex(index);
}
/**
@@ -41,7 +64,7 @@ public IndexRequestBuilder(ElasticsearchClient client, @Nullable String index) {
* generated.
*/
public IndexRequestBuilder setId(String id) {
- request.id(id);
+ this.id = id;
return this;
}
@@ -50,7 +73,7 @@ public IndexRequestBuilder setId(String id) {
* and not the id.
*/
public IndexRequestBuilder setRouting(String routing) {
- request.routing(routing);
+ this.routing = routing;
return this;
}
@@ -58,7 +81,8 @@ public IndexRequestBuilder setRouting(String routing) {
* Sets the source.
*/
public IndexRequestBuilder setSource(BytesReference source, XContentType xContentType) {
- request.source(source, xContentType);
+ this.sourceBytesReference = source;
+ this.sourceContentType = xContentType;
return this;
}
@@ -68,8 +92,7 @@ public IndexRequestBuilder setSource(BytesReference source, XContentType xConten
* @param source The map to index
*/
public IndexRequestBuilder setSource(Map source) {
- request.source(source);
- return this;
+ return setSource(source, Requests.INDEX_CONTENT_TYPE);
}
/**
@@ -78,8 +101,13 @@ public IndexRequestBuilder setSource(Map source) {
* @param source The map to index
*/
public IndexRequestBuilder setSource(Map source, XContentType contentType) {
- request.source(source, contentType);
- return this;
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return setSource(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
}
/**
@@ -89,7 +117,8 @@ public IndexRequestBuilder setSource(Map source, XContentType content
* or using the {@link #setSource(byte[], XContentType)}.
*/
public IndexRequestBuilder setSource(String source, XContentType xContentType) {
- request.source(source, xContentType);
+ this.sourceBytesReference = new BytesArray(source);
+ this.sourceContentType = xContentType;
return this;
}
@@ -97,7 +126,8 @@ public IndexRequestBuilder setSource(String source, XContentType xContentType) {
* Sets the content source to index.
*/
public IndexRequestBuilder setSource(XContentBuilder sourceBuilder) {
- request.source(sourceBuilder);
+ this.sourceBytesReference = BytesReference.bytes(sourceBuilder);
+ this.sourceContentType = sourceBuilder.contentType();
return this;
}
@@ -105,8 +135,7 @@ public IndexRequestBuilder setSource(XContentBuilder sourceBuilder) {
* Sets the document to index in bytes form.
*/
public IndexRequestBuilder setSource(byte[] source, XContentType xContentType) {
- request.source(source, xContentType);
- return this;
+ return setSource(source, 0, source.length, xContentType);
}
/**
@@ -119,7 +148,8 @@ public IndexRequestBuilder setSource(byte[] source, XContentType xContentType) {
* @param xContentType The type/format of the source
*/
public IndexRequestBuilder setSource(byte[] source, int offset, int length, XContentType xContentType) {
- request.source(source, offset, length, xContentType);
+ this.sourceBytesReference = new BytesArray(source, offset, length);
+ this.sourceContentType = xContentType;
return this;
}
@@ -132,8 +162,7 @@ public IndexRequestBuilder setSource(byte[] source, int offset, int length, XCon
*
*/
public IndexRequestBuilder setSource(Object... source) {
- request.source(source);
- return this;
+ return setSource(Requests.INDEX_CONTENT_TYPE, source);
}
/**
@@ -145,15 +174,14 @@ public IndexRequestBuilder setSource(Object... source) {
*
*/
public IndexRequestBuilder setSource(XContentType xContentType, Object... source) {
- request.source(xContentType, source);
- return this;
+ return setSource(IndexRequest.getXContentBuilder(xContentType, source));
}
/**
* Sets the type of operation to perform.
*/
public IndexRequestBuilder setOpType(DocWriteRequest.OpType opType) {
- request.opType(opType);
+ this.opType = opType;
return this;
}
@@ -161,7 +189,7 @@ public IndexRequestBuilder setOpType(DocWriteRequest.OpType opType) {
* Set to {@code true} to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
*/
public IndexRequestBuilder setCreate(boolean create) {
- request.create(create);
+ this.create = create;
return this;
}
@@ -170,7 +198,7 @@ public IndexRequestBuilder setCreate(boolean create) {
* version exists and no changes happened on the doc since then.
*/
public IndexRequestBuilder setVersion(long version) {
- request.version(version);
+ this.version = version;
return this;
}
@@ -178,7 +206,7 @@ public IndexRequestBuilder setVersion(long version) {
* Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
*/
public IndexRequestBuilder setVersionType(VersionType versionType) {
- request.versionType(versionType);
+ this.versionType = versionType;
return this;
}
@@ -190,7 +218,7 @@ public IndexRequestBuilder setVersionType(VersionType versionType) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public IndexRequestBuilder setIfSeqNo(long seqNo) {
- request.setIfSeqNo(seqNo);
+ this.ifSeqNo = seqNo;
return this;
}
@@ -202,7 +230,7 @@ public IndexRequestBuilder setIfSeqNo(long seqNo) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public IndexRequestBuilder setIfPrimaryTerm(long term) {
- request.setIfPrimaryTerm(term);
+ this.ifPrimaryTerm = term;
return this;
}
@@ -210,7 +238,7 @@ public IndexRequestBuilder setIfPrimaryTerm(long term) {
* Sets the ingest pipeline to be executed before indexing the document
*/
public IndexRequestBuilder setPipeline(String pipeline) {
- request.setPipeline(pipeline);
+ this.pipeline = pipeline;
return this;
}
@@ -218,7 +246,7 @@ public IndexRequestBuilder setPipeline(String pipeline) {
* Sets the require_alias flag
*/
public IndexRequestBuilder setRequireAlias(boolean requireAlias) {
- request.setRequireAlias(requireAlias);
+ this.requireAlias = requireAlias;
return this;
}
@@ -226,7 +254,64 @@ public IndexRequestBuilder setRequireAlias(boolean requireAlias) {
* Sets the require_data_stream flag
*/
public IndexRequestBuilder setRequireDataStream(boolean requireDataStream) {
- request.setRequireDataStream(requireDataStream);
+ this.requireDataStream = requireDataStream;
return this;
}
+
+ public IndexRequestBuilder setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
+ this.refreshPolicy = refreshPolicy;
+ return this;
+ }
+
+ public IndexRequestBuilder setRefreshPolicy(String refreshPolicy) {
+ this.refreshPolicy = WriteRequest.RefreshPolicy.parse(refreshPolicy);
+ return this;
+ }
+
+ @Override
+ public IndexRequest request() {
+ IndexRequest request = new IndexRequest();
+ super.apply(request);
+ request.id(id);
+ if (sourceBytesReference != null && sourceContentType != null) {
+ request.source(sourceBytesReference, sourceContentType);
+ }
+ if (pipeline != null) {
+ request.setPipeline(pipeline);
+ }
+ if (routing != null) {
+ request.routing(routing);
+ }
+ if (refreshPolicy != null) {
+ request.setRefreshPolicy(refreshPolicy);
+ }
+ if (ifSeqNo != null) {
+ request.setIfSeqNo(ifSeqNo);
+ }
+ if (ifPrimaryTerm != null) {
+ request.setIfPrimaryTerm(ifPrimaryTerm);
+ }
+ if (pipeline != null) {
+ request.setPipeline(pipeline);
+ }
+ if (requireAlias != null) {
+ request.setRequireAlias(requireAlias);
+ }
+ if (requireDataStream != null) {
+ request.setRequireDataStream(requireDataStream);
+ }
+ if (opType != null) {
+ request.opType(opType);
+ }
+ if (create != null) {
+ request.create(create);
+ }
+ if (version != null) {
+ request.version(version);
+ }
+ if (versionType != null) {
+ request.versionType(versionType);
+ }
+ return request;
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
index 7741c1483f69a..149cdb9206b34 100644
--- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
@@ -121,7 +121,7 @@ private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilde
}
}
if (options.getFetchFields() != null) {
- options.getFetchFields().forEach(ff -> groupSource.fetchField(ff));
+ options.getFetchFields().forEach(groupSource::fetchField);
}
if (options.getDocValueFields() != null) {
options.getDocValueFields().forEach(ff -> groupSource.docValueField(ff.field, ff.format));
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java
index a3763bf101b15..b2ccdc610a4c2 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java
@@ -79,33 +79,14 @@ protected void closeInternal() {
}) : ALWAYS_REFERENCED;
}
- public final boolean timedOut() {
- return this.timedOut;
- }
-
- public final Boolean terminatedEarly() {
- return this.terminatedEarly;
- }
-
public final SearchHits hits() {
return hits;
}
- public final InternalAggregations aggregations() {
- return aggregations;
- }
-
public final Suggest suggest() {
return suggest;
}
- /**
- * Returns the number of reduce phases applied to obtain this search response
- */
- public final int getNumReducePhases() {
- return numReducePhases;
- }
-
/**
* Returns the profile results for this search response (including all shards).
* An empty map is returned if profiling was not enabled
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java
index 0616a99fc5dd0..62b39dd675387 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java
@@ -203,7 +203,7 @@ synchronized ShardSearchFailure[] buildShardFailures() { // pkg private for test
if (shardFailures.isEmpty()) {
return ShardSearchFailure.EMPTY_ARRAY;
}
- return shardFailures.toArray(new ShardSearchFailure[shardFailures.size()]);
+ return shardFailures.toArray(ShardSearchFailure.EMPTY_ARRAY);
}
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
index 9fb0c87c78eb7..d0ae8d1ccb3f1 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
@@ -194,7 +194,7 @@ public void sendExecuteDfs(
DFS_ACTION_NAME,
request,
task,
- new ConnectionCountingHandler<>(listener, DfsSearchResult::new, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(listener, DfsSearchResult::new, connection)
);
}
@@ -216,7 +216,7 @@ public void sendExecuteQuery(
QUERY_ACTION_NAME,
request,
task,
- new ConnectionCountingHandler<>(handler, reader, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(handler, reader, connection)
);
}
@@ -231,7 +231,7 @@ public void sendExecuteQuery(
QUERY_ID_ACTION_NAME,
request,
task,
- new ConnectionCountingHandler<>(listener, QuerySearchResult::new, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(listener, QuerySearchResult::new, connection)
);
}
@@ -246,7 +246,7 @@ public void sendExecuteScrollQuery(
QUERY_SCROLL_ACTION_NAME,
request,
task,
- new ConnectionCountingHandler<>(listener, ScrollQuerySearchResult::new, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(listener, ScrollQuerySearchResult::new, connection)
);
}
@@ -261,7 +261,7 @@ public void sendExecuteScrollFetch(
QUERY_FETCH_SCROLL_ACTION_NAME,
request,
task,
- new ConnectionCountingHandler<>(listener, ScrollQueryFetchSearchResult::new, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(listener, ScrollQueryFetchSearchResult::new, connection)
);
}
@@ -295,7 +295,7 @@ private void sendExecuteFetch(
action,
request,
task,
- new ConnectionCountingHandler<>(listener, FetchSearchResult::new, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(listener, FetchSearchResult::new, connection)
);
}
@@ -309,7 +309,7 @@ void sendExecuteMultiSearch(final MultiSearchRequest request, SearchTask task, f
TransportMultiSearchAction.TYPE.name(),
request,
task,
- new ConnectionCountingHandler<>(listener, MultiSearchResponse::new, clientConnections, connection.getNode().getId())
+ new ConnectionCountingHandler<>(listener, MultiSearchResponse::new, connection)
);
}
@@ -413,14 +413,15 @@ public static void registerRequestHandler(
SearchService searchService,
SearchTransportAPMMetrics searchTransportMetrics
) {
+ final TransportRequestHandler freeContextHandler = (request, channel, task) -> {
+ boolean freed = searchService.freeReaderContext(request.id());
+ channel.sendResponse(new SearchFreeContextResponse(freed));
+ };
transportService.registerRequestHandler(
FREE_CONTEXT_SCROLL_ACTION_NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE,
ScrollFreeContextRequest::new,
- instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> {
- boolean freed = searchService.freeReaderContext(request.id());
- channel.sendResponse(new SearchFreeContextResponse(freed));
- })
+ instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler)
);
TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, false, SearchFreeContextResponse::new);
@@ -428,10 +429,7 @@ public static void registerRequestHandler(
FREE_CONTEXT_ACTION_NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE,
SearchFreeContextRequest::new,
- instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> {
- boolean freed = searchService.freeReaderContext(request.id());
- channel.sendResponse(new SearchFreeContextResponse(freed));
- })
+ instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler)
);
TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, false, SearchFreeContextResponse::new);
@@ -541,20 +539,13 @@ public static void registerRequestHandler(
);
TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new);
+ final TransportRequestHandler shardFetchRequestHandler = (request, channel, task) -> searchService
+ .executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel));
transportService.registerRequestHandler(
FETCH_ID_SCROLL_ACTION_NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE,
ShardFetchRequest::new,
- instrumentedHandler(
- FETCH_ID_SCROLL_ACTION_METRIC,
- transportService,
- searchTransportMetrics,
- (request, channel, task) -> searchService.executeFetchPhase(
- request,
- (SearchShardTask) task,
- new ChannelActionListener<>(channel)
- )
- )
+ instrumentedHandler(FETCH_ID_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, shardFetchRequestHandler)
);
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new);
@@ -564,16 +555,7 @@ public static void registerRequestHandler(
true,
true,
ShardFetchSearchRequest::new,
- instrumentedHandler(
- FETCH_ID_ACTION_METRIC,
- transportService,
- searchTransportMetrics,
- (request, channel, task) -> searchService.executeFetchPhase(
- request,
- (SearchShardTask) task,
- new ChannelActionListener<>(channel)
- )
- )
+ instrumentedHandler(FETCH_ID_ACTION_METRIC, transportService, searchTransportMetrics, shardFetchRequestHandler)
);
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new);
@@ -597,13 +579,16 @@ private static TransportRequestHandler transportRequestHandler
) {
+ var threadPool = transportService.getThreadPool();
+ var latencies = searchTransportMetrics.getActionLatencies();
+ Map attributes = Map.of(ACTION_ATTRIBUTE_NAME, actionQualifier);
return (request, channel, task) -> {
- var startTime = transportService.getThreadPool().relativeTimeInMillis();
+ var startTime = threadPool.relativeTimeInMillis();
try {
transportRequestHandler.messageReceived(request, channel, task);
} finally {
- var elapsedTime = transportService.getThreadPool().relativeTimeInMillis() - startTime;
- searchTransportMetrics.getActionLatencies().record(elapsedTime, Map.of(ACTION_ATTRIBUTE_NAME, actionQualifier));
+ var elapsedTime = threadPool.relativeTimeInMillis() - startTime;
+ latencies.record(elapsedTime, attributes);
}
};
}
@@ -624,19 +609,16 @@ public Transport.Connection getConnection(@Nullable String clusterAlias, Discove
}
}
- static final class ConnectionCountingHandler extends ActionListenerResponseHandler {
- private final Map clientConnections;
+ private final class ConnectionCountingHandler extends ActionListenerResponseHandler {
private final String nodeId;
ConnectionCountingHandler(
final ActionListener super Response> listener,
final Writeable.Reader responseReader,
- final Map clientConnections,
- final String nodeId
+ final Transport.Connection connection
) {
super(listener, responseReader, TransportResponseHandler.TRANSPORT_WORKER);
- this.clientConnections = clientConnections;
- this.nodeId = nodeId;
+ this.nodeId = connection.getNode().getId();
// Increment the number of connections for this node by one
clientConnections.compute(nodeId, (id, conns) -> conns == null ? 1 : conns + 1);
}
@@ -644,27 +626,26 @@ static final class ConnectionCountingHandler
@Override
public void handleResponse(Response response) {
super.handleResponse(response);
- // Decrement the number of connections or remove it entirely if there are no more connections
- // We need to remove the entry here so we don't leak when nodes go away forever
- assert assertNodePresent();
- clientConnections.computeIfPresent(nodeId, (id, conns) -> conns.longValue() == 1 ? null : conns - 1);
+ decConnectionCount();
}
@Override
public void handleException(TransportException e) {
super.handleException(e);
- // Decrement the number of connections or remove it entirely if there are no more connections
- // We need to remove the entry here so we don't leak when nodes go away forever
+ decConnectionCount();
+ }
+
+ // Decrement the number of connections or remove it entirely if there are no more connections
+ // We need to remove the entry here so we don't leak when nodes go away forever
+ private void decConnectionCount() {
assert assertNodePresent();
- clientConnections.computeIfPresent(nodeId, (id, conns) -> conns.longValue() == 1 ? null : conns - 1);
+ clientConnections.computeIfPresent(nodeId, (id, conns) -> conns == 1 ? null : conns - 1);
}
private boolean assertNodePresent() {
- clientConnections.compute(nodeId, (id, conns) -> {
- assert conns != null : "number of connections for " + id + " is null, but should be an integer";
- assert conns >= 1 : "number of connections for " + id + " should be >= 1 but was " + conns;
- return conns;
- });
+ var conns = clientConnections.get(nodeId);
+ assert conns != null : "number of connections for " + nodeId + " is null, but should be an integer";
+ assert conns >= 1 : "number of connections for " + nodeId + " should be >= 1 but was " + conns;
// Always return true, there is additional asserting here, the boolean is just so this
// can be skipped when assertions are not enabled
return true;
diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
index 53ed6853fc08c..d80322b2954c6 100644
--- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
@@ -360,7 +360,7 @@ void executeRequest(
localIndices,
remoteClusterIndices,
true,
- alias -> remoteClusterService.isSkipUnavailable(alias)
+ remoteClusterService::isSkipUnavailable
);
if (localIndices == null) {
// Notify the progress listener that a CCS with minimize_roundtrips is happening remote-only (no local shards)
@@ -395,7 +395,7 @@ void executeRequest(
localIndices,
remoteClusterIndices,
false,
- alias -> remoteClusterService.isSkipUnavailable(alias)
+ remoteClusterService::isSkipUnavailable
);
// TODO: pass parentTaskId
collectSearchShards(
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java
index a4d5e07103df3..8eb82af2091cd 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java
@@ -8,7 +8,7 @@
package org.elasticsearch.action.support.replication;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionRequestLazyBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.support.ActiveShardCount;
@@ -18,18 +18,23 @@
public abstract class ReplicationRequestBuilder<
Request extends ReplicationRequest,
Response extends ActionResponse,
- RequestBuilder extends ReplicationRequestBuilder> extends ActionRequestBuilder {
+ RequestBuilder extends ReplicationRequestBuilder> extends ActionRequestLazyBuilder<
+ Request,
+ Response> {
+ private String index;
+ private TimeValue timeout;
+ private ActiveShardCount waitForActiveShards;
- protected ReplicationRequestBuilder(ElasticsearchClient client, ActionType action, Request request) {
- super(client, action, request);
+ protected ReplicationRequestBuilder(ElasticsearchClient client, ActionType action) {
+ super(client, action);
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(TimeValue timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(TimeValue timeout) {
+ this.timeout = timeout;
return (RequestBuilder) this;
}
@@ -37,24 +42,28 @@ public final RequestBuilder setTimeout(TimeValue timeout) {
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(String timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(String timeout) {
+ this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
return (RequestBuilder) this;
}
@SuppressWarnings("unchecked")
- public final RequestBuilder setIndex(String index) {
- request.index(index);
+ public RequestBuilder setIndex(String index) {
+ this.index = index;
return (RequestBuilder) this;
}
+ public String getIndex() {
+ return index;
+ }
+
/**
* Sets the number of shard copies that must be active before proceeding with the write.
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
@SuppressWarnings("unchecked")
public RequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
- request.waitForActiveShards(waitForActiveShards);
+ this.waitForActiveShards = waitForActiveShards;
return (RequestBuilder) this;
}
@@ -66,4 +75,17 @@ public RequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShard
public RequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
+
+ protected void apply(Request request) {
+ if (index != null) {
+ request.index(index);
+ }
+ if (timeout != null) {
+ request.timeout(timeout);
+ }
+ if (waitForActiveShards != null) {
+ request.waitForActiveShards(waitForActiveShards);
+ }
+ }
+
}
diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
index 931f072e1e45e..64efcda2f14db 100644
--- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
@@ -8,7 +8,7 @@
package org.elasticsearch.action.support.single.instance;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionRequestLazyBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.client.internal.ElasticsearchClient;
@@ -17,26 +17,32 @@
public abstract class InstanceShardOperationRequestBuilder<
Request extends InstanceShardOperationRequest,
Response extends ActionResponse,
- RequestBuilder extends InstanceShardOperationRequestBuilder> extends ActionRequestBuilder<
+ RequestBuilder extends InstanceShardOperationRequestBuilder> extends ActionRequestLazyBuilder<
Request,
Response> {
+ private String index;
+ private TimeValue timeout;
- protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, ActionType action, Request request) {
- super(client, action, request);
+ protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, ActionType action) {
+ super(client, action);
}
@SuppressWarnings("unchecked")
- public final RequestBuilder setIndex(String index) {
- request.index(index);
+ public RequestBuilder setIndex(String index) {
+ this.index = index;
return (RequestBuilder) this;
}
+ protected String getIndex() {
+ return index;
+ }
+
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(TimeValue timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(TimeValue timeout) {
+ this.timeout = timeout;
return (RequestBuilder) this;
}
@@ -44,8 +50,17 @@ public final RequestBuilder setTimeout(TimeValue timeout) {
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(String timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(String timeout) {
+ this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
return (RequestBuilder) this;
}
+
+ protected void apply(Request request) {
+ if (index != null) {
+ request.index(index);
+ }
+ if (timeout != null) {
+ request.timeout(timeout);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
index d7b1ea46b77b0..36b6cc6aa9964 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -704,6 +704,14 @@ private IndexRequest safeDoc() {
return doc;
}
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(BytesReference source, XContentType contentType) {
+ safeUpsertRequest().source(source, contentType);
+ return this;
+ }
+
/**
* Sets the index request to be used if the document does not exists. Otherwise, a
* {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown.
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index 88bed844558f2..c1ee0f7b8af37 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -8,37 +8,77 @@
package org.elasticsearch.action.update;
+import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
+import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
+import org.elasticsearch.client.internal.Requests;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.script.Script;
import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentType;
+import java.io.IOException;
import java.util.Map;
public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder
implements
WriteRequestBuilder {
+ private String id;
+ private String routing;
+ private Script script;
+
+ private String fetchSourceInclude;
+ private String fetchSourceExclude;
+ private String[] fetchSourceIncludeArray;
+ private String[] fetchSourceExcludeArray;
+ private Boolean fetchSource;
+
+ private Integer retryOnConflict;
+ private Long version;
+ private VersionType versionType;
+ private Long ifSeqNo;
+ private Long ifPrimaryTerm;
+ private ActiveShardCount waitForActiveShards;
+
+ private IndexRequest doc;
+ private BytesReference docSourceBytesReference;
+ private XContentType docSourceXContentType;
+
+ private IndexRequest upsert;
+ private BytesReference upsertSourceBytesReference;
+ private XContentType upsertSourceXContentType;
+
+ private Boolean docAsUpsert;
+ private Boolean detectNoop;
+ private Boolean scriptedUpsert;
+ private Boolean requireAlias;
+ private WriteRequest.RefreshPolicy refreshPolicy;
+
public UpdateRequestBuilder(ElasticsearchClient client) {
- super(client, TransportUpdateAction.TYPE, new UpdateRequest());
+ this(client, null, null);
}
public UpdateRequestBuilder(ElasticsearchClient client, String index, String id) {
- super(client, TransportUpdateAction.TYPE, new UpdateRequest(index, id));
+ super(client, TransportUpdateAction.TYPE);
+ setIndex(index);
+ setId(id);
}
/**
* Sets the id of the indexed document.
*/
public UpdateRequestBuilder setId(String id) {
- request.id(id);
+ this.id = id;
return this;
}
@@ -47,7 +87,7 @@ public UpdateRequestBuilder setId(String id) {
* and not the id.
*/
public UpdateRequestBuilder setRouting(String routing) {
- request.routing(routing);
+ this.routing = routing;
return this;
}
@@ -60,7 +100,7 @@ public UpdateRequestBuilder setRouting(String routing) {
*
*/
public UpdateRequestBuilder setScript(Script script) {
- request.script(script);
+ this.script = script;
return this;
}
@@ -77,7 +117,8 @@ public UpdateRequestBuilder setScript(Script script) {
* the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
- request.fetchSource(include, exclude);
+ this.fetchSourceInclude = include;
+ this.fetchSourceExclude = exclude;
return this;
}
@@ -94,7 +135,8 @@ public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable S
* filter the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
- request.fetchSource(includes, excludes);
+ this.fetchSourceIncludeArray = includes;
+ this.fetchSourceExcludeArray = excludes;
return this;
}
@@ -102,7 +144,7 @@ public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullabl
* Indicates whether the response should contain the updated _source.
*/
public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
- request.fetchSource(fetchSource);
+ this.fetchSource = fetchSource;
return this;
}
@@ -111,7 +153,7 @@ public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
* getting it and updating it. Defaults to 0.
*/
public UpdateRequestBuilder setRetryOnConflict(int retryOnConflict) {
- request.retryOnConflict(retryOnConflict);
+ this.retryOnConflict = retryOnConflict;
return this;
}
@@ -120,7 +162,7 @@ public UpdateRequestBuilder setRetryOnConflict(int retryOnConflict) {
* version exists and no changes happened on the doc since then.
*/
public UpdateRequestBuilder setVersion(long version) {
- request.version(version);
+ this.version = version;
return this;
}
@@ -128,7 +170,7 @@ public UpdateRequestBuilder setVersion(long version) {
* Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
*/
public UpdateRequestBuilder setVersionType(VersionType versionType) {
- request.versionType(versionType);
+ this.versionType = versionType;
return this;
}
@@ -140,7 +182,7 @@ public UpdateRequestBuilder setVersionType(VersionType versionType) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public UpdateRequestBuilder setIfSeqNo(long seqNo) {
- request.setIfSeqNo(seqNo);
+ this.ifSeqNo = seqNo;
return this;
}
@@ -152,7 +194,7 @@ public UpdateRequestBuilder setIfSeqNo(long seqNo) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public UpdateRequestBuilder setIfPrimaryTerm(long term) {
- request.setIfPrimaryTerm(term);
+ this.ifPrimaryTerm = term;
return this;
}
@@ -161,7 +203,7 @@ public UpdateRequestBuilder setIfPrimaryTerm(long term) {
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public UpdateRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
- request.waitForActiveShards(waitForActiveShards);
+ this.waitForActiveShards = waitForActiveShards;
return this;
}
@@ -178,7 +220,7 @@ public UpdateRequestBuilder setWaitForActiveShards(final int waitForActiveShards
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(IndexRequest indexRequest) {
- request.doc(indexRequest);
+ this.doc = indexRequest;
return this;
}
@@ -186,7 +228,8 @@ public UpdateRequestBuilder setDoc(IndexRequest indexRequest) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(XContentBuilder source) {
- request.doc(source);
+ this.docSourceBytesReference = BytesReference.bytes(source);
+ this.docSourceXContentType = source.contentType();
return this;
}
@@ -194,23 +237,28 @@ public UpdateRequestBuilder setDoc(XContentBuilder source) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(Map source) {
- request.doc(source);
- return this;
+ return setDoc(source, Requests.INDEX_CONTENT_TYPE);
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(Map source, XContentType contentType) {
- request.doc(source, contentType);
- return this;
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return setDoc(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(String source, XContentType xContentType) {
- request.doc(source, xContentType);
+ this.docSourceBytesReference = new BytesArray(source);
+ this.docSourceXContentType = xContentType;
return this;
}
@@ -218,15 +266,15 @@ public UpdateRequestBuilder setDoc(String source, XContentType xContentType) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(byte[] source, XContentType xContentType) {
- request.doc(source, xContentType);
- return this;
+ return setDoc(source, 0, source.length, xContentType);
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(byte[] source, int offset, int length, XContentType xContentType) {
- request.doc(source, offset, length, xContentType);
+ this.docSourceBytesReference = new BytesArray(source, offset, length);
+ this.docSourceXContentType = xContentType;
return this;
}
@@ -235,8 +283,7 @@ public UpdateRequestBuilder setDoc(byte[] source, int offset, int length, XConte
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(Object... source) {
- request.doc(source);
- return this;
+ return setDoc(Requests.INDEX_CONTENT_TYPE, source);
}
/**
@@ -244,8 +291,7 @@ public UpdateRequestBuilder setDoc(Object... source) {
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) {
- request.doc(xContentType, source);
- return this;
+ return setDoc(IndexRequest.getXContentBuilder(xContentType, source));
}
/**
@@ -253,7 +299,7 @@ public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source)
* {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown.
*/
public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
- request.upsert(indexRequest);
+ this.upsert = indexRequest;
return this;
}
@@ -261,7 +307,8 @@ public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(XContentBuilder source) {
- request.upsert(source);
+ this.upsertSourceBytesReference = BytesReference.bytes(source);
+ this.upsertSourceXContentType = source.contentType();
return this;
}
@@ -269,23 +316,28 @@ public UpdateRequestBuilder setUpsert(XContentBuilder source) {
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(Map source) {
- request.upsert(source);
- return this;
+ return setUpsert(source, Requests.INDEX_CONTENT_TYPE);
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(Map source, XContentType contentType) {
- request.upsert(source, contentType);
- return this;
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return setUpsert(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(String source, XContentType xContentType) {
- request.upsert(source, xContentType);
+ this.upsertSourceBytesReference = new BytesArray(source);
+ this.upsertSourceXContentType = xContentType;
return this;
}
@@ -293,15 +345,15 @@ public UpdateRequestBuilder setUpsert(String source, XContentType xContentType)
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(byte[] source, XContentType xContentType) {
- request.upsert(source, xContentType);
- return this;
+ return setUpsert(source, 0, source.length, xContentType);
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length, XContentType xContentType) {
- request.upsert(source, offset, length, xContentType);
+ this.upsertSourceBytesReference = new BytesArray(source, offset, length);
+ this.upsertSourceXContentType = xContentType;
return this;
}
@@ -310,8 +362,7 @@ public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length, XCo
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(Object... source) {
- request.upsert(source);
- return this;
+ return setUpsert(Requests.INDEX_CONTENT_TYPE, source);
}
/**
@@ -319,15 +370,14 @@ public UpdateRequestBuilder setUpsert(Object... source) {
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(XContentType xContentType, Object... source) {
- request.upsert(xContentType, source);
- return this;
+ return setUpsert(IndexRequest.getXContentBuilder(xContentType, source));
}
/**
* Sets whether the specified doc parameter should be used as upsert document.
*/
public UpdateRequestBuilder setDocAsUpsert(boolean shouldUpsertDoc) {
- request.docAsUpsert(shouldUpsertDoc);
+ this.docAsUpsert = shouldUpsertDoc;
return this;
}
@@ -336,7 +386,7 @@ public UpdateRequestBuilder setDocAsUpsert(boolean shouldUpsertDoc) {
* Defaults to true.
*/
public UpdateRequestBuilder setDetectNoop(boolean detectNoop) {
- request.detectNoop(detectNoop);
+ this.detectNoop = detectNoop;
return this;
}
@@ -344,7 +394,7 @@ public UpdateRequestBuilder setDetectNoop(boolean detectNoop) {
* Sets whether the script should be run in the case of an insert
*/
public UpdateRequestBuilder setScriptedUpsert(boolean scriptedUpsert) {
- request.scriptedUpsert(scriptedUpsert);
+ this.scriptedUpsert = scriptedUpsert;
return this;
}
@@ -352,7 +402,127 @@ public UpdateRequestBuilder setScriptedUpsert(boolean scriptedUpsert) {
* Sets the require_alias flag
*/
public UpdateRequestBuilder setRequireAlias(boolean requireAlias) {
- request.setRequireAlias(requireAlias);
- return this;
+ this.requireAlias = requireAlias;
+ return this;
+ }
+
+ @Override
+ public UpdateRequestBuilder setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
+ this.refreshPolicy = refreshPolicy;
+ return this;
+ }
+
+ @Override
+ public UpdateRequestBuilder setRefreshPolicy(String refreshPolicy) {
+ this.refreshPolicy = WriteRequest.RefreshPolicy.parse(refreshPolicy);
+ return this;
+ }
+
+ @Override
+ public UpdateRequest request() {
+ validate();
+ UpdateRequest request = new UpdateRequest();
+ super.apply(request);
+ if (id != null) {
+ request.id(id);
+ }
+ if (routing != null) {
+ request.routing(routing);
+ }
+ if (script != null) {
+ request.script(script);
+ }
+ if (fetchSourceInclude != null || fetchSourceExclude != null) {
+ request.fetchSource(fetchSourceInclude, fetchSourceExclude);
+ }
+ if (fetchSourceIncludeArray != null || fetchSourceExcludeArray != null) {
+ request.fetchSource(fetchSourceIncludeArray, fetchSourceExcludeArray);
+ }
+ if (fetchSource != null) {
+ request.fetchSource(fetchSource);
+ }
+ if (retryOnConflict != null) {
+ request.retryOnConflict(retryOnConflict);
+ }
+ if (version != null) {
+ request.version(version);
+ }
+ if (versionType != null) {
+ request.versionType(versionType);
+ }
+ if (ifSeqNo != null) {
+ request.setIfSeqNo(ifSeqNo);
+ }
+ if (ifPrimaryTerm != null) {
+ request.setIfPrimaryTerm(ifPrimaryTerm);
+ }
+ if (waitForActiveShards != null) {
+ request.waitForActiveShards(waitForActiveShards);
+ }
+ if (doc != null) {
+ request.doc(doc);
+ }
+ if (docSourceBytesReference != null && docSourceXContentType != null) {
+ request.doc(docSourceBytesReference, docSourceXContentType);
+ }
+ if (upsert != null) {
+ request.upsert(upsert);
+ }
+ if (upsertSourceBytesReference != null && upsertSourceXContentType != null) {
+ request.upsert(upsertSourceBytesReference, upsertSourceXContentType);
+ }
+ if (docAsUpsert != null) {
+ request.docAsUpsert(docAsUpsert);
+ }
+ if (detectNoop != null) {
+ request.detectNoop(detectNoop);
+ }
+ if (scriptedUpsert != null) {
+ request.scriptedUpsert(scriptedUpsert);
+ }
+ if (requireAlias != null) {
+ request.setRequireAlias(requireAlias);
+ }
+ if (refreshPolicy != null) {
+ request.setRefreshPolicy(refreshPolicy);
+ }
+ return request;
+ }
+
+ protected void validate() throws IllegalStateException {
+ boolean fetchIncludeExcludeNotNull = fetchSourceInclude != null || fetchSourceExclude != null;
+ boolean fetchIncludeExcludeArrayNotNull = fetchSourceIncludeArray != null || fetchSourceExcludeArray != null;
+ boolean fetchSourceNotNull = fetchSource != null;
+ if ((fetchIncludeExcludeNotNull && fetchIncludeExcludeArrayNotNull)
+ || (fetchIncludeExcludeNotNull && fetchSourceNotNull)
+ || (fetchIncludeExcludeArrayNotNull && fetchSourceNotNull)) {
+ throw new IllegalStateException("Only one fetchSource() method may be called");
+ }
+ int docSourceFieldsSet = countDocSourceFieldsSet();
+ if (docSourceFieldsSet > 1) {
+ throw new IllegalStateException("Only one setDoc() method may be called, but " + docSourceFieldsSet + " have been");
+ }
+ int upsertSourceFieldsSet = countUpsertSourceFieldsSet();
+ if (upsertSourceFieldsSet > 1) {
+ throw new IllegalStateException("Only one setUpsert() method may be called, but " + upsertSourceFieldsSet + " have been");
+ }
+ }
+
+ private int countDocSourceFieldsSet() {
+ return countNonNullObjects(doc, docSourceBytesReference);
+ }
+
+ private int countUpsertSourceFieldsSet() {
+ return countNonNullObjects(upsert, upsertSourceBytesReference);
+ }
+
+ private int countNonNullObjects(Object... objects) {
+ int sum = 0;
+ for (Object object : objects) {
+ if (object != null) {
+ sum++;
+ }
+ }
+ return sum;
}
}
diff --git a/server/src/main/java/org/elasticsearch/client/internal/Client.java b/server/src/main/java/org/elasticsearch/client/internal/Client.java
index 668168764a4d0..c6a2b0fee767f 100644
--- a/server/src/main/java/org/elasticsearch/client/internal/Client.java
+++ b/server/src/main/java/org/elasticsearch/client/internal/Client.java
@@ -78,7 +78,7 @@ public interface Client extends ElasticsearchClient {
case "node", "transport" -> s;
default -> throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]");
};
- }, Property.NodeScope);
+ }, Property.NodeScope, Property.Deprecated);
/**
* The admin client that can be used to perform administrative operations.
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
index 0f83e6f2d8e19..f817298e4e328 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
@@ -163,8 +163,8 @@ public Iterator extends ToXContent> toXContentChunked(ToXContent.Params params
return builder;
}),
singleChunk(
- (builder, p) -> builder.endObject(), // end "nodes"
- (builder, p) -> builder.startObject("shard_sizes")
+ (builder, p) -> builder.endObject() // end "nodes"
+ .startObject("shard_sizes")
),
Iterators.map(
@@ -172,8 +172,8 @@ public Iterator extends ToXContent> toXContentChunked(ToXContent.Params params
c -> (builder, p) -> builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), ByteSizeValue.ofBytes(c.getValue()))
),
singleChunk(
- (builder, p) -> builder.endObject(), // end "shard_sizes"
- (builder, p) -> builder.startObject("shard_data_set_sizes")
+ (builder, p) -> builder.endObject() // end "shard_sizes"
+ .startObject("shard_data_set_sizes")
),
Iterators.map(
shardDataSetSizes.entrySet().iterator(),
@@ -184,13 +184,13 @@ public Iterator extends ToXContent> toXContentChunked(ToXContent.Params params
)
),
singleChunk(
- (builder, p) -> builder.endObject(), // end "shard_data_set_sizes"
- (builder, p) -> builder.startObject("shard_paths")
+ (builder, p) -> builder.endObject() // end "shard_data_set_sizes"
+ .startObject("shard_paths")
),
Iterators.map(dataPath.entrySet().iterator(), c -> (builder, p) -> builder.field(c.getKey().toString(), c.getValue())),
singleChunk(
- (builder, p) -> builder.endObject(), // end "shard_paths"
- (builder, p) -> builder.startArray("reserved_sizes")
+ (builder, p) -> builder.endObject() // end "shard_paths"
+ .startArray("reserved_sizes")
),
Iterators.map(reservedSpace.entrySet().iterator(), c -> (builder, p) -> {
builder.startObject();
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java
index 15a196601b7b7..511f3f528fb65 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java
@@ -255,6 +255,15 @@ public Index getWriteIndex() {
return writeIndex;
}
+ @Override
+ public Index getWriteIndex(IndexRequest request, Metadata metadata) {
+ if (dataStreamAlias == false) {
+ return getWriteIndex();
+ }
+
+ return metadata.getIndicesLookup().get(getWriteIndex().getName()).getParentDataStream().getWriteIndex(request, metadata);
+ }
+
@Override
public DataStream getParentDataStream() {
// aliases may not be part of a data stream
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java
index a95c3e905d5f4..a72c0c3d51771 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java
@@ -1292,8 +1292,7 @@ public String toString() {
INDEX_DOWNSAMPLE_INTERVAL_KEY,
"",
Property.IndexScope,
- Property.InternalIndex,
- Property.PrivateIndex
+ Property.InternalIndex
);
// LIFECYCLE_NAME is here an as optimization, see LifecycleSettings.LIFECYCLE_NAME and
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
index 90bcc8a7270cb..cd4a929052a62 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
@@ -395,7 +395,7 @@ public void writeToThin(StreamOutput out) throws IOException {
role.writeTo(out);
} else if (role != Role.DEFAULT) {
throw new IllegalStateException(
- Strings.format("cannot send role [%s] with transport version [%s]", role, out.getTransportVersion())
+ Strings.format("cannot send role [%s] to node with version [%s]", role, out.getTransportVersion().toReleaseVersion())
);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
index d4aa3772c44d5..0c25d30593abd 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
@@ -124,16 +124,16 @@ private static Decision isVersionCompatible(
Decision.YES,
NAME,
"max supported index version [%s] is the same or newer than snapshot version [%s]",
- target.node().getMaxIndexVersion(),
- recoverySource.version()
+ target.node().getMaxIndexVersion().toReleaseVersion(),
+ recoverySource.version().toReleaseVersion()
);
} else {
return allocation.decision(
Decision.NO,
NAME,
"max supported index version [%s] is older than the snapshot version [%s]",
- target.node().getMaxIndexVersion(),
- recoverySource.version()
+ target.node().getMaxIndexVersion().toReleaseVersion(),
+ recoverySource.version().toReleaseVersion()
);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java
index ebc725b22c9e1..a92cf1ce2e42c 100644
--- a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java
+++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java
@@ -69,10 +69,10 @@ public static void ensureVersionsCompatibility(CompatibilityVersions candidate,
if (candidate.transportVersion().before(minimumClusterVersions.transportVersion())) {
throw new IllegalStateException(
- "node with transport version ["
- + candidate.transportVersion()
- + "] may not join a cluster with minimum transport version ["
- + minimumClusterVersions.transportVersion()
+ "node with version ["
+ + candidate.transportVersion().toReleaseVersion()
+ + "] may not join a cluster with minimum version ["
+ + minimumClusterVersions.transportVersion().toReleaseVersion()
+ "]"
);
}
diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
index 2fca882724bbd..1e30579292d00 100644
--- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
@@ -143,6 +143,14 @@ public int read() throws IOException {
@Override
public int read(final byte[] b, final int bOffset, final int len) throws IOException {
+ if (slice.remaining() >= len) {
+ slice.get(b, bOffset, len);
+ return len;
+ }
+ return readFromMultipleSlices(b, bOffset, len);
+ }
+
+ private int readFromMultipleSlices(byte[] b, int bOffset, int len) throws IOException {
final int length = bytesReference.length();
final int offset = offset();
if (offset >= length) {
@@ -186,6 +194,14 @@ public long skip(long n) throws IOException {
if (n <= 0L) {
return 0L;
}
+ if (n <= slice.remaining()) {
+ slice.position(slice.position() + (int) n);
+ return n;
+ }
+ return skipMultiple(n);
+ }
+
+ private int skipMultiple(long n) throws IOException {
assert offset() <= bytesReference.length() : offset() + " vs " + bytesReference.length();
// definitely >= 0 and <= Integer.MAX_VALUE so casting is ok
final int numBytesSkipped = (int) Math.min(n, bytesReference.length() - offset());
diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
index 567f39d968200..e9fe63529e17a 100644
--- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
+++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
@@ -144,7 +144,7 @@ public long ramBytesUsed() {
@Override
public StreamInput streamInput() throws IOException {
assert hasReferences();
- return new BytesReferenceStreamInput(this) {
+ return new BytesReferenceStreamInput(delegate) {
private ReleasableBytesReference retainAndSkip(int len) throws IOException {
if (len == 0) {
return ReleasableBytesReference.empty();
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index c4f0dc58f5ffd..9e271ee6f9bfc 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -364,7 +364,7 @@ public Text readOptionalText() throws IOException {
}
public Text readText() throws IOException {
- // use StringAndBytes so we can cache the string if its ever converted to it
+ // use StringAndBytes so we can cache the string if it's ever converted to it
int length = readInt();
return new Text(readBytesReference(length));
}
@@ -1271,8 +1271,8 @@ protected int readArraySize() throws IOException {
if (arraySize < 0) {
throwNegative(arraySize);
}
- // lets do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely
- // throw an exception instead of allocating the array based on the size. A simple corrutpted byte can make a node go OOM
+ // let's do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely
+ // throw an exception instead of allocating the array based on the size. A simple corrupted byte can make a node go OOM
// if the size is large and for perf reasons we allocate arrays ahead of time
ensureCanReadBytes(arraySize);
return arraySize;
@@ -1287,7 +1287,7 @@ private static void throwExceedsMaxArraySize(int arraySize) {
}
/**
- * This method throws an {@link EOFException} if the given number of bytes can not be read from the this stream. This method might
+ * This method throws an {@link EOFException} if the given number of bytes can not be read from the stream. This method might
* be a no-op depending on the underlying implementation if the information of the remaining bytes is not present.
*/
protected abstract void ensureCanReadBytes(int length) throws EOFException;
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
index a0b62bdabc08b..b67879510b108 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
@@ -596,10 +596,13 @@ public final void writeMap(final Map<
* @param valueWriter The value writer
*/
public final void writeMap(final Map map, final Writer keyWriter, final Writer valueWriter) throws IOException {
- writeVInt(map.size());
- for (final Map.Entry entry : map.entrySet()) {
- keyWriter.write(this, entry.getKey());
- valueWriter.write(this, entry.getValue());
+ int size = map.size();
+ writeVInt(size);
+ if (size > 0) {
+ for (final Map.Entry entry : map.entrySet()) {
+ keyWriter.write(this, entry.getKey());
+ valueWriter.write(this, entry.getValue());
+ }
}
}
@@ -784,8 +787,10 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep
})
);
+ public static final byte GENERIC_LIST_HEADER = (byte) 7;
+
public void writeGenericList(List v, Writer writer) throws IOException {
- writeByte((byte) 7);
+ writeByte(GENERIC_LIST_HEADER);
writeCollection(v, writer);
}
diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java
index dc73dc77c71af..34d583ed7e732 100644
--- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java
+++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java
@@ -2125,7 +2125,8 @@ static DateFormatter forPattern(String input) {
input,
new DateTimeFormatterBuilder().appendPattern(input).toFormatter(Locale.ROOT).withResolverStyle(ResolverStyle.STRICT)
);
- } catch (IllegalArgumentException e) {
+ } catch (IllegalArgumentException | ClassCastException e) {
+ // ClassCastException catches this bug https://bugs.openjdk.org/browse/JDK-8193877
throw new IllegalArgumentException("Invalid format: [" + input + "]: " + e.getMessage(), e);
}
}
diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java
index 4eaf9b5636623..fbc506f600097 100644
--- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java
+++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java
@@ -115,17 +115,13 @@ public static Iterator map(String name, Map map, Func
}
/**
- * Creates an Iterator of a single ToXContent object that serializes all the given 'contents' ToXContent objects into a single chunk.
+ * Creates an Iterator of a single ToXContent object that serializes the given object as a single chunk. Just wraps {@link
+ * Iterators#single}, but still useful because it avoids any type ambiguity.
*
- * @param contents ToXContent objects supporting toXContent() calls.
- * @return Iterator of a single ToXContent object serializing all the ToXContent "contents".
+ * @param item Item to wrap
+ * @return Singleton iterator for the given item.
*/
- public static Iterator singleChunk(ToXContent... contents) {
- return Iterators.single((builder, params) -> {
- for (ToXContent content : contents) {
- content.toXContent(builder, params);
- }
- return builder;
- });
+ public static Iterator singleChunk(ToXContent item) {
+ return Iterators.single(item);
}
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java b/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java
index fdda0a260068c..f1e085482b72a 100644
--- a/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java
+++ b/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java
@@ -20,7 +20,7 @@
* The health status of the disk space of this node along with the cause.
*/
public record DiskHealthInfo(HealthStatus healthStatus, @Nullable Cause cause) implements Writeable {
- DiskHealthInfo(HealthStatus healthStatus) {
+ public DiskHealthInfo(HealthStatus healthStatus) {
this(healthStatus, null);
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java b/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java
index 0bb8027f8299d..97087c05e8de8 100644
--- a/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java
+++ b/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java
@@ -23,19 +23,25 @@
* This class wraps all the data returned by the health node.
* @param diskInfoByNode A Map of node id to DiskHealthInfo for that node
* @param dslHealthInfo The data stream lifecycle health information
+ * @param repositoriesInfoByNode A Map of node id to RepositoriesHealthInfo for that node
*/
-public record HealthInfo(Map diskInfoByNode, @Nullable DataStreamLifecycleHealthInfo dslHealthInfo)
- implements
- Writeable {
+public record HealthInfo(
+ Map diskInfoByNode,
+ @Nullable DataStreamLifecycleHealthInfo dslHealthInfo,
+ Map repositoriesInfoByNode
+) implements Writeable {
- public static final HealthInfo EMPTY_HEALTH_INFO = new HealthInfo(Map.of(), NO_DSL_ERRORS);
+ public static final HealthInfo EMPTY_HEALTH_INFO = new HealthInfo(Map.of(), NO_DSL_ERRORS, Map.of());
public HealthInfo(StreamInput input) throws IOException {
this(
input.readMap(DiskHealthInfo::new),
input.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_DSL_STATUS)
? input.readOptionalWriteable(DataStreamLifecycleHealthInfo::new)
- : null
+ : null,
+ input.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_REPOS)
+ ? input.readMap(RepositoriesHealthInfo::new)
+ : Map.of()
);
}
@@ -45,5 +51,8 @@ public void writeTo(StreamOutput output) throws IOException {
if (output.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_DSL_STATUS)) {
output.writeOptionalWriteable(dslHealthInfo);
}
+ if (output.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_REPOS)) {
+ output.writeMap(repositoriesInfoByNode, StreamOutput::writeWriteable);
+ }
}
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/HealthInfoCache.java b/server/src/main/java/org/elasticsearch/health/node/HealthInfoCache.java
index 986b5e13dce6e..58ac3b03dd964 100644
--- a/server/src/main/java/org/elasticsearch/health/node/HealthInfoCache.java
+++ b/server/src/main/java/org/elasticsearch/health/node/HealthInfoCache.java
@@ -29,6 +29,7 @@ public class HealthInfoCache implements ClusterStateListener {
private volatile ConcurrentHashMap diskInfoByNode = new ConcurrentHashMap<>();
@Nullable
private volatile DataStreamLifecycleHealthInfo dslHealthInfo = null;
+ private volatile ConcurrentHashMap repositoriesInfoByNode = new ConcurrentHashMap<>();
private HealthInfoCache() {}
@@ -41,7 +42,8 @@ public static HealthInfoCache create(ClusterService clusterService) {
public void updateNodeHealth(
String nodeId,
@Nullable DiskHealthInfo diskHealthInfo,
- @Nullable DataStreamLifecycleHealthInfo latestDslHealthInfo
+ @Nullable DataStreamLifecycleHealthInfo latestDslHealthInfo,
+ @Nullable RepositoriesHealthInfo repositoriesHealthInfo
) {
if (diskHealthInfo != null) {
diskInfoByNode.put(nodeId, diskHealthInfo);
@@ -49,6 +51,9 @@ public void updateNodeHealth(
if (latestDslHealthInfo != null) {
dslHealthInfo = latestDslHealthInfo;
}
+ if (repositoriesHealthInfo != null) {
+ repositoriesInfoByNode.put(nodeId, repositoriesHealthInfo);
+ }
}
@Override
@@ -59,16 +64,18 @@ public void clusterChanged(ClusterChangedEvent event) {
if (event.nodesRemoved()) {
for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) {
diskInfoByNode.remove(removedNode.getId());
+ repositoriesInfoByNode.remove(removedNode.getId());
}
}
// Resetting the cache is not synchronized for efficiency and simplicity.
// Processing a delayed update after the cache has been emptied because
// the node is not the health node anymore has small impact since it will
// be reset in the next round again.
- } else if (diskInfoByNode.isEmpty() == false) {
+ } else if (diskInfoByNode.isEmpty() == false || dslHealthInfo != null || repositoriesInfoByNode.isEmpty() == false) {
logger.debug("Node [{}][{}] is no longer the health node, emptying the cache.", localNode.getName(), localNode.getId());
diskInfoByNode = new ConcurrentHashMap<>();
dslHealthInfo = null;
+ repositoriesInfoByNode = new ConcurrentHashMap<>();
}
}
@@ -78,6 +85,6 @@ public void clusterChanged(ClusterChangedEvent event) {
*/
public HealthInfo getHealthInfo() {
// A shallow copy is enough because the inner data is immutable.
- return new HealthInfo(Map.copyOf(diskInfoByNode), dslHealthInfo);
+ return new HealthInfo(Map.copyOf(diskInfoByNode), dslHealthInfo, Map.copyOf(repositoriesInfoByNode));
}
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
index 94cd518051199..d5d336b88b8ad 100644
--- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
+++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
@@ -11,51 +11,40 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
-import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
-import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
-import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.node.DiscoveryNodeRole;
-import org.elasticsearch.cluster.routing.RoutingNode;
-import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.RunOnce;
-import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.HealthFeatures;
-import org.elasticsearch.health.HealthStatus;
import org.elasticsearch.health.metadata.HealthMetadata;
import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException;
import org.elasticsearch.health.node.selection.HealthNode;
import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor;
-import org.elasticsearch.node.NodeService;
+import org.elasticsearch.health.node.tracker.HealthTracker;
import org.elasticsearch.threadpool.Scheduler;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.NodeNotConnectedException;
+import java.util.List;
import java.util.Objects;
-import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.core.Strings.format;
/**
- * This class monitors the health of the node regarding the load on several resources.
- * Currently, it only checks for available disk space. Furthermore, it informs the health
- * node about the local health upon change or when a new node is detected or when the
- * master node changed.
+ * This class monitors the local health of the node, such as the load and any errors that can be specific to a node
+ * (as opposed to errors that are cluster-wide). It informs the health node about the local health upon change, or
+ * when a new node is detected, or when the master node changed.
*/
public class LocalHealthMonitor implements ClusterStateListener {
@@ -71,7 +60,6 @@ public class LocalHealthMonitor implements ClusterStateListener {
private final ClusterService clusterService;
private final ThreadPool threadPool;
- private final DiskCheck diskCheck;
private final Client client;
private final FeatureService featureService;
@@ -81,10 +69,10 @@ public class LocalHealthMonitor implements ClusterStateListener {
// Signals that all the prerequisites have been fulfilled and the monitoring can be started.
private volatile boolean prerequisitesFulfilled;
- // Keeps the latest health state that was successfully reported to the current health node.
- private final AtomicReference lastReportedDiskHealthInfo = new AtomicReference<>();
+ // List of health trackers to be executed in each monitoring cycle.
+ private final List> healthTrackers;
// Keeps the last seen health node. We use this variable to ensure that there wasn't a health node
- // change between the time we send an update until the time we update the lastReportedDiskHealthInfo.
+ // change between the time we send an update until the time we record the last health state that was successfully reported.
private final AtomicReference lastSeenHealthNode = new AtomicReference<>();
// Using a volatile reference to ensure that there is a single instance of monitoring running at all times.
// No need for extra synchronization because all the writes are executed on the cluster applier thread.
@@ -93,35 +81,35 @@ public class LocalHealthMonitor implements ClusterStateListener {
private LocalHealthMonitor(
Settings settings,
ClusterService clusterService,
- NodeService nodeService,
ThreadPool threadPool,
Client client,
- FeatureService featureService
+ FeatureService featureService,
+ List> healthTrackers
) {
this.threadPool = threadPool;
this.monitorInterval = POLL_INTERVAL_SETTING.get(settings);
this.enabled = HealthNodeTaskExecutor.ENABLED_SETTING.get(settings);
this.clusterService = clusterService;
this.client = client;
- this.diskCheck = new DiskCheck(nodeService);
this.featureService = featureService;
+ this.healthTrackers = healthTrackers;
}
public static LocalHealthMonitor create(
Settings settings,
ClusterService clusterService,
- NodeService nodeService,
ThreadPool threadPool,
Client client,
- FeatureService featureService
+ FeatureService featureService,
+ List> healthTrackers
) {
LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(
settings,
clusterService,
- nodeService,
threadPool,
client,
- featureService
+ featureService,
+ healthTrackers
);
localHealthMonitor.registerListeners();
return localHealthMonitor;
@@ -164,15 +152,7 @@ private void stopMonitoring() {
private void startMonitoringIfNecessary() {
if (prerequisitesFulfilled && enabled) {
if (isMonitorRunning() == false) {
- monitoring = Monitoring.start(
- monitorInterval,
- threadPool,
- lastReportedDiskHealthInfo,
- lastSeenHealthNode,
- diskCheck,
- clusterService,
- client
- );
+ monitoring = Monitoring.start(monitorInterval, threadPool, lastSeenHealthNode, healthTrackers, clusterService, client);
logger.debug("Local health monitoring started {}", monitoring);
} else {
logger.trace("Local health monitoring already started {}, skipping", monitoring);
@@ -195,7 +175,8 @@ public void clusterChanged(ClusterChangedEvent event) {
// On health node or on master node changes, the health node might be reset so the reported
// health info gets reset to null, to ensure it will be resent.
lastSeenHealthNode.set(currentHealthNode == null ? null : currentHealthNode.getId());
- lastReportedDiskHealthInfo.set(null);
+ // Reset the reference of each HealthTracker.
+ healthTrackers.forEach(HealthTracker::reset);
if (logger.isDebugEnabled()) {
String reason;
if (healthNodeChanged && masterNodeChanged) {
@@ -242,11 +223,6 @@ private boolean hasHealthNodeChanged(DiscoveryNode currentHealthNode, ClusterCha
|| Objects.equals(previousHealthNode, currentHealthNode) == false;
}
- @Nullable
- DiskHealthInfo getLastReportedDiskHealthInfo() {
- return lastReportedDiskHealthInfo.get();
- }
-
/**
* This class is responsible for running the health monitoring. It evaluates and checks the health info of this node
* in the configured intervals. The first run happens upon initialization. If there is an exception, it will log it
@@ -258,11 +234,10 @@ static class Monitoring implements Runnable, Scheduler.Cancellable {
private final Executor executor;
private final Scheduler scheduler;
private final ClusterService clusterService;
- private final DiskCheck diskCheck;
private final Client client;
- private final AtomicReference lastReportedDiskHealthInfo;
private final AtomicReference lastSeenHealthNode;
+ private final List> healthTrackers;
private volatile boolean cancelled = false;
private volatile Scheduler.ScheduledCancellable scheduledRun;
@@ -271,19 +246,17 @@ private Monitoring(
TimeValue interval,
Scheduler scheduler,
Executor executor,
- AtomicReference lastReportedDiskHealthInfo,
AtomicReference lastSeenHealthNode,
- DiskCheck diskCheck,
+ List> healthTrackers,
ClusterService clusterService,
Client client
) {
this.interval = interval;
this.executor = executor;
this.scheduler = scheduler;
- this.lastReportedDiskHealthInfo = lastReportedDiskHealthInfo;
this.lastSeenHealthNode = lastSeenHealthNode;
this.clusterService = clusterService;
- this.diskCheck = diskCheck;
+ this.healthTrackers = healthTrackers;
this.client = client;
}
@@ -293,9 +266,8 @@ private Monitoring(
static Monitoring start(
TimeValue interval,
ThreadPool threadPool,
- AtomicReference lastReportedDiskHealthInfo,
AtomicReference lastSeenHealthNode,
- DiskCheck diskCheck,
+ List> healthTrackers,
ClusterService clusterService,
Client client
) {
@@ -303,9 +275,8 @@ static Monitoring start(
interval,
threadPool,
threadPool.executor(ThreadPool.Names.MANAGEMENT),
- lastReportedDiskHealthInfo,
lastSeenHealthNode,
- diskCheck,
+ healthTrackers,
clusterService,
client
);
@@ -350,43 +321,31 @@ public void run() {
boolean nextRunScheduled = false;
Runnable scheduleNextRun = new RunOnce(this::scheduleNextRunIfNecessary);
try {
- ClusterState clusterState = clusterService.state();
- HealthMetadata healthMetadata = HealthMetadata.getFromClusterState(clusterState);
- if (healthMetadata != null) {
- DiskHealthInfo previousHealth = this.lastReportedDiskHealthInfo.get();
- DiskHealthInfo currentHealth = diskCheck.getHealth(healthMetadata, clusterState);
- if (currentHealth.equals(previousHealth) == false) {
- String nodeId = clusterService.localNode().getId();
- String healthNodeId = lastSeenHealthNode.get();
- ActionListener listener = ActionListener.wrap(response -> {
- // Update the last reported value only if the health node hasn't changed.
- if (Objects.equals(healthNodeId, lastSeenHealthNode.get())
- && lastReportedDiskHealthInfo.compareAndSet(previousHealth, currentHealth)) {
- logger.debug(
- "Health info [{}] successfully sent, last reported value: {}.",
- currentHealth,
- lastReportedDiskHealthInfo.get()
- );
- }
- }, e -> {
- if (e.getCause() instanceof NodeNotConnectedException
- || e.getCause() instanceof HealthNodeNotDiscoveredException) {
- logger.debug("Failed to connect to the health node [{}], will try again.", e.getCause().getMessage());
- } else {
- logger.debug(
- () -> format("Failed to send health info [%s] to health node, will try again.", currentHealth),
- e
- );
- }
- });
- client.execute(
- UpdateHealthInfoCacheAction.INSTANCE,
- new UpdateHealthInfoCacheAction.Request(nodeId, currentHealth),
- ActionListener.runAfter(listener, scheduleNextRun)
- );
- nextRunScheduled = true;
- }
+ List> healthProgresses = getHealthProgresses();
+ if (healthProgresses.isEmpty()) {
+ // Next run will still be scheduled in the `finally` block.
+ return;
}
+ // Create builder and add the current value of each (changed) health tracker to the request.
+ var builder = new UpdateHealthInfoCacheAction.Request.Builder().nodeId(clusterService.localNode().getId());
+ healthProgresses.forEach(changedHealthInfo -> changedHealthInfo.updateRequestBuilder(builder));
+
+ var healthNodeId = lastSeenHealthNode.get();
+ var listener = ActionListener.wrap(response -> {
+ // Don't update the latest health info if the health node has changed while this request was being processed.
+ if (Objects.equals(healthNodeId, lastSeenHealthNode.get()) == false) {
+ return;
+ }
+ healthProgresses.forEach(HealthTracker.HealthProgress::recordProgressIfRelevant);
+ }, e -> {
+ if (e.getCause() instanceof NodeNotConnectedException || e.getCause() instanceof HealthNodeNotDiscoveredException) {
+ logger.debug("Failed to connect to the health node [{}], will try again.", e.getCause().getMessage());
+ } else {
+ logger.debug(() -> format("Failed to send health info to health node, will try again."), e);
+ }
+ });
+ client.execute(UpdateHealthInfoCacheAction.INSTANCE, builder.build(), ActionListener.runAfter(listener, scheduleNextRun));
+ nextRunScheduled = true;
} catch (Exception e) {
logger.warn(() -> format("Failed to run scheduled health monitoring on thread pool [%s]", executor), e);
} finally {
@@ -397,6 +356,24 @@ public void run() {
}
}
+ /**
+ * Retrieve the current health of each tracker and return a list of the ones that have changed.
+ *
+ * @return a list of changed health info's.
+ */
+ private List> getHealthProgresses() {
+ var healthMetadata = HealthMetadata.getFromClusterState(clusterService.state());
+ // Don't try to run the health trackers if the HealthMetadata is not available.
+ if (healthMetadata == null) {
+ return List.of();
+ }
+
+ return healthTrackers.stream().>map(HealthTracker::trackHealth)
+ // Only return changed values.
+ .filter(HealthTracker.HealthProgress::hasChanged)
+ .toList();
+ }
+
private void scheduleNextRunIfNecessary() {
if (cancelled) {
return;
@@ -413,94 +390,4 @@ public String toString() {
return "Monitoring{interval=" + interval + ", cancelled=" + cancelled + "}";
}
}
-
- /**
- * Determines the disk health of this node by checking if it exceeds the thresholds defined in the health metadata.
- */
- static class DiskCheck {
- private final NodeService nodeService;
-
- DiskCheck(NodeService nodeService) {
- this.nodeService = nodeService;
- }
-
- DiskHealthInfo getHealth(HealthMetadata healthMetadata, ClusterState clusterState) {
- DiscoveryNode node = clusterState.getNodes().getLocalNode();
- HealthMetadata.Disk diskMetadata = healthMetadata.getDiskMetadata();
- DiskUsage usage = getDiskUsage();
- if (usage == null) {
- return new DiskHealthInfo(HealthStatus.UNKNOWN, DiskHealthInfo.Cause.NODE_HAS_NO_DISK_STATS);
- }
-
- ByteSizeValue totalBytes = ByteSizeValue.ofBytes(usage.totalBytes());
-
- if (node.isDedicatedFrozenNode() || isDedicatedSearchNode(node)) {
- long frozenFloodStageThreshold = diskMetadata.getFreeBytesFrozenFloodStageWatermark(totalBytes).getBytes();
- if (usage.freeBytes() < frozenFloodStageThreshold) {
- logger.debug("Flood stage disk watermark [{}] exceeded on {}", frozenFloodStageThreshold, usage);
- return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.FROZEN_NODE_OVER_FLOOD_STAGE_THRESHOLD);
- }
- return new DiskHealthInfo(HealthStatus.GREEN);
- }
- long floodStageThreshold = diskMetadata.getFreeBytesFloodStageWatermark(totalBytes).getBytes();
- if (usage.freeBytes() < floodStageThreshold) {
- logger.debug("Flood stage disk watermark [{}] exceeded on {}", floodStageThreshold, usage);
- return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.NODE_OVER_THE_FLOOD_STAGE_THRESHOLD);
- }
-
- long highThreshold = diskMetadata.getFreeBytesHighWatermark(totalBytes).getBytes();
- if (usage.freeBytes() < highThreshold) {
- if (node.canContainData()) {
- // for data nodes only report YELLOW if shards can't move away from the node
- if (DiskCheck.hasRelocatingShards(clusterState, node) == false) {
- logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage);
- return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD);
- }
- } else {
- // for non-data nodes report YELLOW when the disk high watermark is breached
- logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage);
- return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD);
- }
- }
- return new DiskHealthInfo(HealthStatus.GREEN);
- }
-
- private static boolean isDedicatedSearchNode(DiscoveryNode node) {
- Set roles = node.getRoles();
- return roles.contains(DiscoveryNodeRole.SEARCH_ROLE)
- && roles.stream().filter(DiscoveryNodeRole::canContainData).anyMatch(r -> r != DiscoveryNodeRole.SEARCH_ROLE) == false;
- }
-
- private DiskUsage getDiskUsage() {
- NodeStats nodeStats = nodeService.stats(
- CommonStatsFlags.NONE,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- );
- return DiskUsage.findLeastAvailablePath(nodeStats);
- }
-
- static boolean hasRelocatingShards(ClusterState clusterState, DiscoveryNode node) {
- RoutingNode routingNode = clusterState.getRoutingNodes().node(node.getId());
- if (routingNode == null) {
- // routing node will be null for non-data nodes
- return false;
- }
- return routingNode.numberOfShardsWithState(ShardRoutingState.RELOCATING) > 0;
- }
- }
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/RepositoriesHealthInfo.java b/server/src/main/java/org/elasticsearch/health/node/RepositoriesHealthInfo.java
new file mode 100644
index 0000000000000..ffbad943a783a
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/health/node/RepositoriesHealthInfo.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.health.node;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Health info regarding repository health for a node. It refers to issues that are local to a node such as the unknown and
+ * invalid repositories.
+ */
+public record RepositoriesHealthInfo(List unknownRepositories, List invalidRepositories) implements Writeable {
+ public RepositoriesHealthInfo(StreamInput in) throws IOException {
+ this(in.readStringCollectionAsList(), in.readStringCollectionAsList());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeStringCollection(unknownRepositories);
+ out.writeStringCollection(invalidRepositories);
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/health/node/UpdateHealthInfoCacheAction.java b/server/src/main/java/org/elasticsearch/health/node/UpdateHealthInfoCacheAction.java
index 9567331c678b5..bbbe84630f20a 100644
--- a/server/src/main/java/org/elasticsearch/health/node/UpdateHealthInfoCacheAction.java
+++ b/server/src/main/java/org/elasticsearch/health/node/UpdateHealthInfoCacheAction.java
@@ -27,6 +27,7 @@
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
+import java.util.Locale;
import java.util.Objects;
/**
@@ -42,16 +43,20 @@ public static class Request extends HealthNodeRequest {
private final DiskHealthInfo diskHealthInfo;
@Nullable
private final DataStreamLifecycleHealthInfo dslHealthInfo;
+ @Nullable
+ private final RepositoriesHealthInfo repositoriesHealthInfo;
- public Request(String nodeId, DiskHealthInfo diskHealthInfo) {
+ public Request(String nodeId, DiskHealthInfo diskHealthInfo, RepositoriesHealthInfo repositoriesHealthInfo) {
this.nodeId = nodeId;
this.diskHealthInfo = diskHealthInfo;
+ this.repositoriesHealthInfo = repositoriesHealthInfo;
this.dslHealthInfo = null;
}
public Request(String nodeId, DataStreamLifecycleHealthInfo dslHealthInfo) {
this.nodeId = nodeId;
this.diskHealthInfo = null;
+ this.repositoriesHealthInfo = null;
this.dslHealthInfo = dslHealthInfo;
}
@@ -61,6 +66,9 @@ public Request(StreamInput in) throws IOException {
if (in.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_DSL_STATUS)) {
this.diskHealthInfo = in.readOptionalWriteable(DiskHealthInfo::new);
this.dslHealthInfo = in.readOptionalWriteable(DataStreamLifecycleHealthInfo::new);
+ this.repositoriesHealthInfo = in.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_REPOS)
+ ? in.readOptionalWriteable(RepositoriesHealthInfo::new)
+ : null;
} else {
// BWC for pre-8.12 the disk health info was mandatory. Evolving this request has proven tricky however we've made use of
// waiting for all nodes to be on the {@link TransportVersions.HEALTH_INFO_ENRICHED_WITH_DSL_STATUS} transport version
@@ -68,6 +76,7 @@ public Request(StreamInput in) throws IOException {
// transport invariant of always having a disk health information in the request
this.diskHealthInfo = new DiskHealthInfo(in);
this.dslHealthInfo = null;
+ this.repositoriesHealthInfo = null;
}
}
@@ -83,6 +92,10 @@ public DataStreamLifecycleHealthInfo getDslHealthInfo() {
return dslHealthInfo;
}
+ public RepositoriesHealthInfo getRepositoriesHealthInfo() {
+ return repositoriesHealthInfo;
+ }
+
@Override
public ActionRequestValidationException validate() {
return null;
@@ -95,6 +108,9 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_DSL_STATUS)) {
out.writeOptionalWriteable(diskHealthInfo);
out.writeOptionalWriteable(dslHealthInfo);
+ if (out.getTransportVersion().onOrAfter(TransportVersions.HEALTH_INFO_ENRICHED_WITH_REPOS)) {
+ out.writeOptionalWriteable(repositoriesHealthInfo);
+ }
} else {
// BWC for pre-8.12 the disk health info was mandatory. Evolving this request has proven tricky however we've made use of
// waiting for all nodes to be on the {@link TransportVersions.HEALTH_INFO_ENRICHED_WITH_DSL_STATUS} transport version
@@ -106,14 +122,14 @@ public void writeTo(StreamOutput out) throws IOException {
@Override
public String getDescription() {
- return "Update health info cache for node ["
- + nodeId
- + "] with disk health info ["
- + diskHealthInfo
- + "] and DSL health info"
- + " ["
- + dslHealthInfo
- + "].";
+ return String.format(
+ Locale.ROOT,
+ "Update health info cache for node [%s] with disk health info [%s], DSL health info [%s], repositories health info [%s].",
+ nodeId,
+ diskHealthInfo,
+ dslHealthInfo,
+ repositoriesHealthInfo
+ );
}
@Override
@@ -127,12 +143,44 @@ public boolean equals(Object o) {
Request request = (Request) o;
return Objects.equals(nodeId, request.nodeId)
&& Objects.equals(diskHealthInfo, request.diskHealthInfo)
- && Objects.equals(dslHealthInfo, request.dslHealthInfo);
+ && Objects.equals(dslHealthInfo, request.dslHealthInfo)
+ && Objects.equals(repositoriesHealthInfo, request.repositoriesHealthInfo);
}
@Override
public int hashCode() {
- return Objects.hash(nodeId, diskHealthInfo, dslHealthInfo);
+ return Objects.hash(nodeId, diskHealthInfo, dslHealthInfo, repositoriesHealthInfo);
+ }
+
+ public static class Builder {
+ private String nodeId;
+ private DiskHealthInfo diskHealthInfo;
+ private RepositoriesHealthInfo repositoriesHealthInfo;
+ private DataStreamLifecycleHealthInfo dslHealthInfo;
+
+ public Builder nodeId(String nodeId) {
+ this.nodeId = nodeId;
+ return this;
+ }
+
+ public Builder diskHealthInfo(DiskHealthInfo diskHealthInfo) {
+ this.diskHealthInfo = diskHealthInfo;
+ return this;
+ }
+
+ public Builder repositoriesHealthInfo(RepositoriesHealthInfo repositoriesHealthInfo) {
+ this.repositoriesHealthInfo = repositoriesHealthInfo;
+ return this;
+ }
+
+ public Builder dslHealthInfo(DataStreamLifecycleHealthInfo dslHealthInfo) {
+ this.dslHealthInfo = dslHealthInfo;
+ return this;
+ }
+
+ public Request build() {
+ return new Request(nodeId, diskHealthInfo, repositoriesHealthInfo);
+ }
}
}
@@ -174,7 +222,12 @@ protected void healthOperation(
ClusterState clusterState,
ActionListener listener
) {
- nodeHealthOverview.updateNodeHealth(request.getNodeId(), request.getDiskHealthInfo(), request.getDslHealthInfo());
+ nodeHealthOverview.updateNodeHealth(
+ request.getNodeId(),
+ request.getDiskHealthInfo(),
+ request.getDslHealthInfo(),
+ request.getRepositoriesHealthInfo()
+ );
listener.onResponse(AcknowledgedResponse.of(true));
}
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/tracker/DiskHealthTracker.java b/server/src/main/java/org/elasticsearch/health/node/tracker/DiskHealthTracker.java
new file mode 100644
index 0000000000000..a478130d83a78
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/health/node/tracker/DiskHealthTracker.java
@@ -0,0 +1,136 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.health.node.tracker;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodeRole;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.health.HealthStatus;
+import org.elasticsearch.health.metadata.HealthMetadata;
+import org.elasticsearch.health.node.DiskHealthInfo;
+import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
+import org.elasticsearch.node.NodeService;
+
+import java.util.Set;
+
+/**
+ * Determines the disk health of this node by checking if it exceeds the thresholds defined in the health metadata.
+ */
+public class DiskHealthTracker extends HealthTracker {
+ private static final Logger logger = LogManager.getLogger(DiskHealthTracker.class);
+
+ private final NodeService nodeService;
+ private final ClusterService clusterService;
+
+ public DiskHealthTracker(NodeService nodeService, ClusterService clusterService) {
+ this.nodeService = nodeService;
+ this.clusterService = clusterService;
+ }
+
+ /**
+ * Determines the disk health of this node by checking if it exceeds the thresholds defined in the health metadata.
+ *
+ * @return the current disk health info.
+ */
+ @Override
+ public DiskHealthInfo checkCurrentHealth() {
+ var clusterState = clusterService.state();
+ var healthMetadata = HealthMetadata.getFromClusterState(clusterState);
+ DiscoveryNode node = clusterState.getNodes().getLocalNode();
+ HealthMetadata.Disk diskMetadata = healthMetadata.getDiskMetadata();
+ DiskUsage usage = getDiskUsage();
+ if (usage == null) {
+ return new DiskHealthInfo(HealthStatus.UNKNOWN, DiskHealthInfo.Cause.NODE_HAS_NO_DISK_STATS);
+ }
+
+ ByteSizeValue totalBytes = ByteSizeValue.ofBytes(usage.totalBytes());
+
+ if (node.isDedicatedFrozenNode() || isDedicatedSearchNode(node)) {
+ long frozenFloodStageThreshold = diskMetadata.getFreeBytesFrozenFloodStageWatermark(totalBytes).getBytes();
+ if (usage.freeBytes() < frozenFloodStageThreshold) {
+ logger.debug("Flood stage disk watermark [{}] exceeded on {}", frozenFloodStageThreshold, usage);
+ return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.FROZEN_NODE_OVER_FLOOD_STAGE_THRESHOLD);
+ }
+ return new DiskHealthInfo(HealthStatus.GREEN);
+ }
+ long floodStageThreshold = diskMetadata.getFreeBytesFloodStageWatermark(totalBytes).getBytes();
+ if (usage.freeBytes() < floodStageThreshold) {
+ logger.debug("Flood stage disk watermark [{}] exceeded on {}", floodStageThreshold, usage);
+ return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.NODE_OVER_THE_FLOOD_STAGE_THRESHOLD);
+ }
+
+ long highThreshold = diskMetadata.getFreeBytesHighWatermark(totalBytes).getBytes();
+ if (usage.freeBytes() < highThreshold) {
+ if (node.canContainData()) {
+ // for data nodes only report YELLOW if shards can't move away from the node
+ if (DiskHealthTracker.hasRelocatingShards(clusterState, node) == false) {
+ logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage);
+ return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD);
+ }
+ } else {
+ // for non-data nodes report YELLOW when the disk high watermark is breached
+ logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage);
+ return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD);
+ }
+ }
+ return new DiskHealthInfo(HealthStatus.GREEN);
+ }
+
+ @Override
+ public void addToRequestBuilder(UpdateHealthInfoCacheAction.Request.Builder builder, DiskHealthInfo healthInfo) {
+ builder.diskHealthInfo(healthInfo);
+ }
+
+ private static boolean isDedicatedSearchNode(DiscoveryNode node) {
+ Set roles = node.getRoles();
+ return roles.contains(DiscoveryNodeRole.SEARCH_ROLE)
+ && roles.stream().filter(DiscoveryNodeRole::canContainData).anyMatch(r -> r != DiscoveryNodeRole.SEARCH_ROLE) == false;
+ }
+
+ private DiskUsage getDiskUsage() {
+ NodeStats nodeStats = nodeService.stats(
+ CommonStatsFlags.NONE,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false
+ );
+ return DiskUsage.findLeastAvailablePath(nodeStats);
+ }
+
+ static boolean hasRelocatingShards(ClusterState clusterState, DiscoveryNode node) {
+ RoutingNode routingNode = clusterState.getRoutingNodes().node(node.getId());
+ if (routingNode == null) {
+ // routing node will be null for non-data nodes
+ return false;
+ }
+ return routingNode.numberOfShardsWithState(ShardRoutingState.RELOCATING) > 0;
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/health/node/tracker/HealthTracker.java b/server/src/main/java/org/elasticsearch/health/node/tracker/HealthTracker.java
new file mode 100644
index 0000000000000..2dd71a38f959e
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/health/node/tracker/HealthTracker.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.health.node.tracker;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.health.node.LocalHealthMonitor;
+import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
+
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Base class for health trackers that will be executed by the {@link LocalHealthMonitor}. It keeps track of the last
+ * reported value and can retrieve the current health status when requested.
+ *
+ * @param the type of the health check result they track
+ */
+public abstract class HealthTracker {
+ private static final Logger logger = LogManager.getLogger(HealthTracker.class);
+
+ private final AtomicReference lastReportedValue = new AtomicReference<>();
+
+ /**
+ * Determine the health info for this health check.
+ *
+ * @return the health info.
+ */
+ public abstract T checkCurrentHealth();
+
+ /**
+ * Add the health info to the request builder.
+ *
+ * @param builder the builder to add the health info to.
+ * @param healthInfo the health info to add.
+ */
+ public abstract void addToRequestBuilder(UpdateHealthInfoCacheAction.Request.Builder builder, T healthInfo);
+
+ /**
+ * Create a new {@link HealthProgress} instance by getting the current last reported value and determining the health info at this time.
+ *
+ * @return the new {@link HealthProgress} instance.
+ */
+ public HealthProgress trackHealth() {
+ return new HealthProgress<>(this, lastReportedValue.get(), checkCurrentHealth());
+ }
+
+ /**
+ * Update the last reported health info to current, but only when the value inside lastReportedValue
+ * is equal to previous.
+ *
+ * @param previous the previous value that should be in lastReportedValue at the time of execution.
+ * @param current the value that should be stored in lastReportedValue.
+ */
+ public void updateLastReportedHealth(T previous, T current) {
+ if (lastReportedValue.compareAndSet(previous, current)) {
+ logger.debug("Health info [{}] successfully sent, last reported value: {}.", current, previous);
+ }
+ }
+
+ /**
+ * Reset the value of lastReportedValue to null.
+ * Should be used when, for example, the master or health node has changed.
+ */
+ public void reset() {
+ lastReportedValue.set(null);
+ }
+
+ public T getLastReportedValue() {
+ return lastReportedValue.get();
+ }
+
+ /**
+ * A record for storing the previous and current value of a health check. This allows us to be sure no concurrent processes have
+ * updated the health check's reference value.
+ *
+ * @param the type that the health tracker returns
+ */
+ public record HealthProgress(HealthTracker healthTracker, T previousHealth, T currentHealth) {
+ public boolean hasChanged() {
+ return Objects.equals(previousHealth, currentHealth) == false;
+ }
+
+ /**
+ * See {@link HealthTracker#addToRequestBuilder}.
+ */
+ public void updateRequestBuilder(UpdateHealthInfoCacheAction.Request.Builder builder) {
+ healthTracker.addToRequestBuilder(builder, currentHealth);
+ }
+
+ /**
+ * Update the reference value of the health tracker with the current health info.
+ * See {@link HealthTracker#updateLastReportedHealth} for more info.
+ */
+ public void recordProgressIfRelevant() {
+ healthTracker.updateLastReportedHealth(previousHealth, currentHealth);
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/health/node/tracker/RepositoriesHealthTracker.java b/server/src/main/java/org/elasticsearch/health/node/tracker/RepositoriesHealthTracker.java
new file mode 100644
index 0000000000000..cffc470045e0b
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/health/node/tracker/RepositoriesHealthTracker.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.health.node.tracker;
+
+import org.elasticsearch.health.node.RepositoriesHealthInfo;
+import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
+import org.elasticsearch.repositories.InvalidRepository;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.repositories.UnknownTypeRepository;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Determines the health of repositories on this node.
+ */
+public class RepositoriesHealthTracker extends HealthTracker {
+ private final RepositoriesService repositoriesService;
+
+ public RepositoriesHealthTracker(RepositoriesService repositoriesService) {
+ this.repositoriesService = repositoriesService;
+ }
+
+ /**
+ * Determine the health of the repositories on this node. Do so by checking the current collection of registered repositories.
+ *
+ * @return the current repositories health on this node.
+ */
+ @Override
+ public RepositoriesHealthInfo checkCurrentHealth() {
+ var repositories = repositoriesService.getRepositories();
+ if (repositories.isEmpty()) {
+ return new RepositoriesHealthInfo(List.of(), List.of());
+ }
+
+ var unknown = new ArrayList();
+ var invalid = new ArrayList();
+ repositories.values().forEach(repository -> {
+ if (repository instanceof UnknownTypeRepository) {
+ unknown.add(repository.getMetadata().name());
+ } else if (repository instanceof InvalidRepository) {
+ invalid.add(repository.getMetadata().name());
+ }
+ });
+ return new RepositoriesHealthInfo(unknown, invalid);
+ }
+
+ @Override
+ public void addToRequestBuilder(UpdateHealthInfoCacheAction.Request.Builder builder, RepositoriesHealthInfo healthInfo) {
+ builder.repositoriesHealthInfo(healthInfo);
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java
index c5a5e5a5c4b96..f534d8b2dc806 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexService.java
@@ -330,7 +330,7 @@ public NodeMappingStats getNodeMappingStats() {
if (mapperService == null) {
return null;
}
- long totalCount = mapperService().mappingLookup().getTotalFieldsCount();
+ long totalCount = mapperService().mappingLookup().getTotalMapperCount();
long totalEstimatedOverhead = totalCount * 1024L; // 1KiB estimated per mapping
NodeMappingStats indexNodeMappingStats = new NodeMappingStats(totalCount, totalEstimatedOverhead);
return indexNodeMappingStats;
diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java
index 852547ecb1073..0e2b50257ae37 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java
@@ -21,13 +21,14 @@
import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat;
import org.elasticsearch.index.codec.postings.ES812PostingsFormat;
import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat;
+import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
-import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.NumberFieldMapper;
-import org.elasticsearch.index.mapper.TimeSeriesParams;
+import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper;
import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper;
/**
@@ -110,33 +111,26 @@ public DocValuesFormat getDocValuesFormatForField(String field) {
}
boolean useTSDBDocValuesFormat(final String field) {
- return mapperService.getIndexSettings().isES87TSDBCodecEnabled()
- && isTimeSeriesModeIndex()
- && isNotSpecialField(field)
- && (isCounterOrGaugeMetricType(field) || isTimestampField(field));
- }
-
- private boolean isTimeSeriesModeIndex() {
- return IndexMode.TIME_SERIES.equals(mapperService.getIndexSettings().getMode());
- }
-
- private boolean isCounterOrGaugeMetricType(String field) {
- if (mapperService != null) {
+ if (mapperService != null && mapperService.getIndexSettings().isES87TSDBCodecEnabled() && isTimeSeriesModeIndex()) {
final MappingLookup mappingLookup = mapperService.mappingLookup();
if (mappingLookup.getMapper(field) instanceof NumberFieldMapper) {
- final MappedFieldType fieldType = mappingLookup.getFieldType(field);
- return TimeSeriesParams.MetricType.COUNTER.equals(fieldType.getMetricType())
- || TimeSeriesParams.MetricType.GAUGE.equals(fieldType.getMetricType());
+ return true;
+ }
+ if (mappingLookup.getMapper(field) instanceof DateFieldMapper) {
+ return true;
+ }
+ if (mappingLookup.getMapper(field) instanceof KeywordFieldMapper) {
+ return true;
+ }
+ if (mappingLookup.getMapper(field) instanceof TimeSeriesIdFieldMapper) {
+ return true;
}
}
return false;
}
- private static boolean isTimestampField(String field) {
- return "@timestamp".equals(field);
+ private boolean isTimeSeriesModeIndex() {
+ return IndexMode.TIME_SERIES == mapperService.getIndexSettings().getMode();
}
- private static boolean isNotSpecialField(String field) {
- return field.startsWith("_") == false;
- }
}
diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java
index 17bdcbbfb0739..15dc386f41284 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java
@@ -16,25 +16,43 @@
import org.apache.lucene.index.EmptyDocValuesProducer;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.store.ByteArrayDataOutput;
import org.apache.lucene.store.ByteBuffersDataOutput;
import org.apache.lucene.store.ByteBuffersIndexOutput;
import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.compress.LZ4;
import org.apache.lucene.util.packed.DirectMonotonicWriter;
+import org.apache.lucene.util.packed.PackedInts;
import org.elasticsearch.core.IOUtils;
import java.io.IOException;
import java.util.Arrays;
+import static org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT;
+import static org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat.SORTED_SET;
+
final class ES87TSDBDocValuesConsumer extends DocValuesConsumer {
IndexOutput data, meta;
final int maxDoc;
+ private byte[] termsDictBuffer;
ES87TSDBDocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension)
throws IOException {
+ this.termsDictBuffer = new byte[1 << 14];
boolean success = false;
try {
final String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
@@ -68,15 +86,15 @@ final class ES87TSDBDocValuesConsumer extends DocValuesConsumer {
public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
meta.writeInt(field.number);
meta.writeByte(ES87TSDBDocValuesFormat.NUMERIC);
- writeNumericField(field, new EmptyDocValuesProducer() {
+ writeField(field, new EmptyDocValuesProducer() {
@Override
public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
return DocValues.singleton(valuesProducer.getNumeric(field));
}
- });
+ }, -1);
}
- private long[] writeNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
+ private long[] writeField(FieldInfo field, DocValuesProducer valuesProducer, long maxOrd) throws IOException {
int numDocsWithValue = 0;
long numValues = 0;
@@ -109,7 +127,8 @@ private long[] writeNumericField(FieldInfo field, DocValuesProducer valuesProduc
meta.writeLong(numValues);
if (numValues > 0) {
- meta.writeInt(ES87TSDBDocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT);
+ // Special case for maxOrd of 1, signal -1 that no blocks will be written
+ meta.writeInt(maxOrd != 1 ? ES87TSDBDocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT : -1);
final ByteBuffersDataOutput indexOut = new ByteBuffersDataOutput();
final DirectMonotonicWriter indexWriter = DirectMonotonicWriter.getInstance(
meta,
@@ -118,32 +137,46 @@ private long[] writeNumericField(FieldInfo field, DocValuesProducer valuesProduc
ES87TSDBDocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT
);
- final long[] buffer = new long[ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE];
- int bufferSize = 0;
final long valuesDataOffset = data.getFilePointer();
- final ES87TSDBDocValuesEncoder encoder = new ES87TSDBDocValuesEncoder();
-
- values = valuesProducer.getSortedNumeric(field);
- for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) {
- final int count = values.docValueCount();
- for (int i = 0; i < count; ++i) {
- buffer[bufferSize++] = values.nextValue();
- if (bufferSize == ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE) {
- indexWriter.add(data.getFilePointer() - valuesDataOffset);
+ // Special case for maxOrd of 1, skip writing the blocks
+ if (maxOrd != 1) {
+ final long[] buffer = new long[ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE];
+ int bufferSize = 0;
+ final ES87TSDBDocValuesEncoder encoder = new ES87TSDBDocValuesEncoder();
+ values = valuesProducer.getSortedNumeric(field);
+ final int bitsPerOrd = maxOrd >= 0 ? PackedInts.bitsRequired(maxOrd - 1) : -1;
+ for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) {
+ final int count = values.docValueCount();
+ for (int i = 0; i < count; ++i) {
+ buffer[bufferSize++] = values.nextValue();
+ if (bufferSize == ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE) {
+ indexWriter.add(data.getFilePointer() - valuesDataOffset);
+ if (maxOrd >= 0) {
+ encoder.encodeOrdinals(buffer, data, bitsPerOrd);
+ } else {
+ encoder.encode(buffer, data);
+ }
+ bufferSize = 0;
+ }
+ }
+ }
+ if (bufferSize > 0) {
+ indexWriter.add(data.getFilePointer() - valuesDataOffset);
+ // Fill unused slots in the block with zeroes rather than junk
+ Arrays.fill(buffer, bufferSize, ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE, 0L);
+ if (maxOrd >= 0) {
+ encoder.encodeOrdinals(buffer, data, bitsPerOrd);
+ } else {
encoder.encode(buffer, data);
- bufferSize = 0;
}
}
}
- if (bufferSize > 0) {
- indexWriter.add(data.getFilePointer() - valuesDataOffset);
- // Fill unused slots in the block with zeroes rather than junk
- Arrays.fill(buffer, bufferSize, ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE, 0L);
- encoder.encode(buffer, data);
- }
final long valuesDataLength = data.getFilePointer() - valuesDataOffset;
- indexWriter.finish();
+ if (maxOrd != 1) {
+ // Special case for maxOrd of 1, indexWriter isn't really used, so no need to invoke finish() method.
+ indexWriter.finish();
+ }
final long indexDataOffset = data.getFilePointer();
data.copyBytes(indexOut.toDataInput(), indexOut.size());
meta.writeLong(indexDataOffset);
@@ -163,18 +196,205 @@ public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) th
@Override
public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
- throw new UnsupportedOperationException("Unsupported sorted doc values for field [" + field.name + "]");
+ meta.writeInt(field.number);
+ meta.writeByte(ES87TSDBDocValuesFormat.SORTED);
+ doAddSortedField(field, valuesProducer);
+ }
+
+ private void doAddSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
+ SortedDocValues sorted = valuesProducer.getSorted(field);
+ int maxOrd = sorted.getValueCount();
+ writeField(field, new EmptyDocValuesProducer() {
+ @Override
+ public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
+ SortedDocValues sorted = valuesProducer.getSorted(field);
+ NumericDocValues sortedOrds = new NumericDocValues() {
+ @Override
+ public long longValue() throws IOException {
+ return sorted.ordValue();
+ }
+
+ @Override
+ public boolean advanceExact(int target) throws IOException {
+ return sorted.advanceExact(target);
+ }
+
+ @Override
+ public int docID() {
+ return sorted.docID();
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return sorted.nextDoc();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return sorted.advance(target);
+ }
+
+ @Override
+ public long cost() {
+ return sorted.cost();
+ }
+ };
+ return DocValues.singleton(sortedOrds);
+ }
+ }, maxOrd);
+ addTermsDict(DocValues.singleton(valuesProducer.getSorted(field)));
+ }
+
+ private void addTermsDict(SortedSetDocValues values) throws IOException {
+ final long size = values.getValueCount();
+ meta.writeVLong(size);
+
+ int blockMask = ES87TSDBDocValuesFormat.TERMS_DICT_BLOCK_LZ4_MASK;
+ int shift = ES87TSDBDocValuesFormat.TERMS_DICT_BLOCK_LZ4_SHIFT;
+
+ meta.writeInt(DIRECT_MONOTONIC_BLOCK_SHIFT);
+ ByteBuffersDataOutput addressBuffer = new ByteBuffersDataOutput();
+ ByteBuffersIndexOutput addressOutput = new ByteBuffersIndexOutput(addressBuffer, "temp", "temp");
+ long numBlocks = (size + blockMask) >>> shift;
+ DirectMonotonicWriter writer = DirectMonotonicWriter.getInstance(meta, addressOutput, numBlocks, DIRECT_MONOTONIC_BLOCK_SHIFT);
+
+ BytesRefBuilder previous = new BytesRefBuilder();
+ long ord = 0;
+ long start = data.getFilePointer();
+ int maxLength = 0, maxBlockLength = 0;
+ TermsEnum iterator = values.termsEnum();
+
+ LZ4.FastCompressionHashTable ht = new LZ4.FastCompressionHashTable();
+ ByteArrayDataOutput bufferedOutput = new ByteArrayDataOutput(termsDictBuffer);
+ int dictLength = 0;
+
+ for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
+ if ((ord & blockMask) == 0) {
+ if (ord != 0) {
+ // flush the previous block
+ final int uncompressedLength = compressAndGetTermsDictBlockLength(bufferedOutput, dictLength, ht);
+ maxBlockLength = Math.max(maxBlockLength, uncompressedLength);
+ bufferedOutput.reset(termsDictBuffer);
+ }
+
+ writer.add(data.getFilePointer() - start);
+ // Write the first term both to the index output, and to the buffer where we'll use it as a
+ // dictionary for compression
+ data.writeVInt(term.length);
+ data.writeBytes(term.bytes, term.offset, term.length);
+ bufferedOutput = maybeGrowBuffer(bufferedOutput, term.length);
+ bufferedOutput.writeBytes(term.bytes, term.offset, term.length);
+ dictLength = term.length;
+ } else {
+ final int prefixLength = StringHelper.bytesDifference(previous.get(), term);
+ final int suffixLength = term.length - prefixLength;
+ assert suffixLength > 0; // terms are unique
+ // Will write (suffixLength + 1 byte + 2 vint) bytes. Grow the buffer in need.
+ bufferedOutput = maybeGrowBuffer(bufferedOutput, suffixLength + 11);
+ bufferedOutput.writeByte((byte) (Math.min(prefixLength, 15) | (Math.min(15, suffixLength - 1) << 4)));
+ if (prefixLength >= 15) {
+ bufferedOutput.writeVInt(prefixLength - 15);
+ }
+ if (suffixLength >= 16) {
+ bufferedOutput.writeVInt(suffixLength - 16);
+ }
+ bufferedOutput.writeBytes(term.bytes, term.offset + prefixLength, suffixLength);
+ }
+ maxLength = Math.max(maxLength, term.length);
+ previous.copyBytes(term);
+ ++ord;
+ }
+ // Compress and write out the last block
+ if (bufferedOutput.getPosition() > dictLength) {
+ final int uncompressedLength = compressAndGetTermsDictBlockLength(bufferedOutput, dictLength, ht);
+ maxBlockLength = Math.max(maxBlockLength, uncompressedLength);
+ }
+
+ writer.finish();
+ meta.writeInt(maxLength);
+ // Write one more int for storing max block length.
+ meta.writeInt(maxBlockLength);
+ meta.writeLong(start);
+ meta.writeLong(data.getFilePointer() - start);
+ start = data.getFilePointer();
+ addressBuffer.copyTo(data);
+ meta.writeLong(start);
+ meta.writeLong(data.getFilePointer() - start);
+
+ // Now write the reverse terms index
+ writeTermsIndex(values);
+ }
+
+ private int compressAndGetTermsDictBlockLength(ByteArrayDataOutput bufferedOutput, int dictLength, LZ4.FastCompressionHashTable ht)
+ throws IOException {
+ int uncompressedLength = bufferedOutput.getPosition() - dictLength;
+ data.writeVInt(uncompressedLength);
+ LZ4.compressWithDictionary(termsDictBuffer, 0, dictLength, uncompressedLength, data, ht);
+ return uncompressedLength;
+ }
+
+ private ByteArrayDataOutput maybeGrowBuffer(ByteArrayDataOutput bufferedOutput, int termLength) {
+ int pos = bufferedOutput.getPosition(), originalLength = termsDictBuffer.length;
+ if (pos + termLength >= originalLength - 1) {
+ termsDictBuffer = ArrayUtil.grow(termsDictBuffer, originalLength + termLength);
+ bufferedOutput = new ByteArrayDataOutput(termsDictBuffer, pos, termsDictBuffer.length - pos);
+ }
+ return bufferedOutput;
+ }
+
+ private void writeTermsIndex(SortedSetDocValues values) throws IOException {
+ final long size = values.getValueCount();
+ meta.writeInt(ES87TSDBDocValuesFormat.TERMS_DICT_REVERSE_INDEX_SHIFT);
+ long start = data.getFilePointer();
+
+ long numBlocks = 1L + ((size + ES87TSDBDocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK)
+ >>> ES87TSDBDocValuesFormat.TERMS_DICT_REVERSE_INDEX_SHIFT);
+ ByteBuffersDataOutput addressBuffer = new ByteBuffersDataOutput();
+ DirectMonotonicWriter writer;
+ try (ByteBuffersIndexOutput addressOutput = new ByteBuffersIndexOutput(addressBuffer, "temp", "temp")) {
+ writer = DirectMonotonicWriter.getInstance(meta, addressOutput, numBlocks, DIRECT_MONOTONIC_BLOCK_SHIFT);
+ TermsEnum iterator = values.termsEnum();
+ BytesRefBuilder previous = new BytesRefBuilder();
+ long offset = 0;
+ long ord = 0;
+ for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
+ if ((ord & ES87TSDBDocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) == 0) {
+ writer.add(offset);
+ final int sortKeyLength;
+ if (ord == 0) {
+ // no previous term: no bytes to write
+ sortKeyLength = 0;
+ } else {
+ sortKeyLength = StringHelper.sortKeyLength(previous.get(), term);
+ }
+ offset += sortKeyLength;
+ data.writeBytes(term.bytes, term.offset, sortKeyLength);
+ } else if ((ord
+ & ES87TSDBDocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) == ES87TSDBDocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) {
+ previous.copyBytes(term);
+ }
+ ++ord;
+ }
+ writer.add(offset);
+ writer.finish();
+ meta.writeLong(start);
+ meta.writeLong(data.getFilePointer() - start);
+ start = data.getFilePointer();
+ addressBuffer.copyTo(data);
+ meta.writeLong(start);
+ meta.writeLong(data.getFilePointer() - start);
+ }
}
@Override
public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
meta.writeInt(field.number);
meta.writeByte(ES87TSDBDocValuesFormat.SORTED_NUMERIC);
- writeSortedNumericField(field, valuesProducer);
+ writeSortedNumericField(field, valuesProducer, -1);
}
- private void writeSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
- long[] stats = writeNumericField(field, valuesProducer);
+ private void writeSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer, long maxOrd) throws IOException {
+ long[] stats = writeField(field, valuesProducer, maxOrd);
int numDocsWithField = Math.toIntExact(stats[0]);
long numValues = stats[1];
assert numValues >= numDocsWithField;
@@ -203,9 +423,98 @@ private void writeSortedNumericField(FieldInfo field, DocValuesProducer valuesPr
}
}
+ private static boolean isSingleValued(SortedSetDocValues values) throws IOException {
+ if (DocValues.unwrapSingleton(values) != null) {
+ return true;
+ }
+
+ assert values.docID() == -1;
+ for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) {
+ int docValueCount = values.docValueCount();
+ assert docValueCount > 0;
+ if (docValueCount > 1) {
+ return false;
+ }
+ }
+ return true;
+ }
+
@Override
public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException {
- throw new UnsupportedOperationException("Unsupported sorted set doc values for field [" + field.name + "]");
+ meta.writeInt(field.number);
+ meta.writeByte(SORTED_SET);
+
+ if (isSingleValued(valuesProducer.getSortedSet(field))) {
+ meta.writeByte((byte) 0); // multiValued (0 = singleValued)
+ doAddSortedField(field, new EmptyDocValuesProducer() {
+ @Override
+ public SortedDocValues getSorted(FieldInfo field) throws IOException {
+ return SortedSetSelector.wrap(valuesProducer.getSortedSet(field), SortedSetSelector.Type.MIN);
+ }
+ });
+ return;
+ }
+ meta.writeByte((byte) 1); // multiValued (1 = multiValued)
+
+ SortedSetDocValues values = valuesProducer.getSortedSet(field);
+ long maxOrd = values.getValueCount();
+ writeSortedNumericField(field, new EmptyDocValuesProducer() {
+ @Override
+ public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
+ SortedSetDocValues values = valuesProducer.getSortedSet(field);
+ return new SortedNumericDocValues() {
+
+ long[] ords = LongsRef.EMPTY_LONGS;
+ int i, docValueCount;
+
+ @Override
+ public long nextValue() throws IOException {
+ return ords[i++];
+ }
+
+ @Override
+ public int docValueCount() {
+ return docValueCount;
+ }
+
+ @Override
+ public boolean advanceExact(int target) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int docID() {
+ return values.docID();
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ int doc = values.nextDoc();
+ if (doc != NO_MORE_DOCS) {
+ docValueCount = values.docValueCount();
+ ords = ArrayUtil.grow(ords, docValueCount);
+ for (int j = 0; j < docValueCount; j++) {
+ ords[j] = values.nextOrd();
+ }
+ i = 0;
+ }
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long cost() {
+ return values.cost();
+ }
+ };
+ }
+ }, maxOrd);
+
+ addTermsDict(valuesProducer.getSortedSet(field));
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java
index e3877e65581f2..f293eb86141b6 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java
@@ -178,6 +178,77 @@ void encode(long[] in, DataOutput out) throws IOException {
deltaEncode(0, 0, in, out);
}
+ /**
+ * Optimizes for encoding sorted fields where we expect a block to mostly either be the same value
+ * or to make a transition from one value to a second one.
+ *
+ * Encodes blocks in the following format:
+ *
+ *
byte 0: 1/2 bits header+6/7 bits data
+ *
byte 1..n: data
+ *
+ * The header (first 1 or 2 bits) describes how the data is encoded:
+ *
+ *
?0 block has a single value (vlong), 2nd bit already contains data
+ *
+ * 01 block has two runs, data contains value 1 (vlong), run-length (vint) of value 1,
+ * and delta from first to second value (zlong)
+ *
+ *
11 block is bit-packed
+ *
+ */
+ void encodeOrdinals(long[] in, DataOutput out, int bitsPerOrd) throws IOException {
+ assert in.length == ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE;
+ int numRuns = 1;
+ for (int i = 1; i < in.length; ++i) {
+ if (in[i - 1] != in[i]) {
+ numRuns++;
+ }
+ }
+ if (numRuns == 1 && bitsPerOrd < 63) {
+ long value = in[0];
+ // set first bit to 0 to indicate the block has a single run
+ out.writeVLong(value << 1);
+ } else if (numRuns == 2 && bitsPerOrd < 62) {
+ // set first two bits to 01 to indicate the block has two runs
+ out.writeVLong((in[0] << 2) | 0b01);
+ int firstRunLen = in.length;
+ for (int i = 1; i < in.length; ++i) {
+ if (in[i] != in[0]) {
+ firstRunLen = i;
+ break;
+ }
+ }
+ out.writeVInt(firstRunLen);
+ out.writeZLong(in[in.length - 1] - in[0]);
+ } else {
+ // set first two bits to 11 to indicate the block is bit-packed
+ out.writeVLong(0b11);
+ forUtil.encode(in, bitsPerOrd, out);
+ }
+ }
+
+ void decodeOrdinals(DataInput in, long[] out, int bitsPerOrd) throws IOException {
+ assert out.length == ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE : out.length;
+
+ long v1 = in.readVLong();
+ int header = (int) (v1 & 0b11L);
+ if (header == 0b00 || header == 0b10) {
+ // first bit is zero -> single run
+ Arrays.fill(out, v1 >>> 1);
+ } else if (header == 0b01) {
+ // first two bits are 01 -> two runs
+ v1 = v1 >>> 2;
+ int runLen = in.readVInt();
+ long v2 = v1 + in.readZLong();
+ Arrays.fill(out, 0, runLen, v1);
+ Arrays.fill(out, runLen, out.length, v2);
+ } else {
+ // first two bits are 11 -> bit-packed
+ forUtil.decode(bitsPerOrd, in, out);
+ }
+ }
+
/** Decode longs that have been encoded with {@link #encode}. */
void decode(DataInput in, long[] out) throws IOException {
assert out.length == ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE : out.length;
diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java
index d8b2ea8b677b8..c5f597f27eb98 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java
@@ -34,6 +34,14 @@ public class ES87TSDBDocValuesFormat extends org.apache.lucene.codecs.DocValuesF
static final byte SORTED_SET = 3;
static final byte SORTED_NUMERIC = 4;
+ static final int TERMS_DICT_BLOCK_LZ4_SHIFT = 6;
+ static final int TERMS_DICT_BLOCK_LZ4_SIZE = 1 << TERMS_DICT_BLOCK_LZ4_SHIFT;
+ static final int TERMS_DICT_BLOCK_LZ4_MASK = TERMS_DICT_BLOCK_LZ4_SIZE - 1;
+
+ static final int TERMS_DICT_REVERSE_INDEX_SHIFT = 10;
+ static final int TERMS_DICT_REVERSE_INDEX_SIZE = 1 << TERMS_DICT_REVERSE_INDEX_SHIFT;
+ static final int TERMS_DICT_REVERSE_INDEX_MASK = TERMS_DICT_REVERSE_INDEX_SIZE - 1;
+
public ES87TSDBDocValuesFormat() {
super(CODEC_NAME);
}
diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java
index 4c691d84e2b4d..a06227351473a 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java
@@ -11,30 +11,43 @@
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.codecs.lucene90.IndexedDISI;
+import org.apache.lucene.index.BaseTermsEnum;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.ImpactsEnum;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.ChecksumIndexInput;
+import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RandomAccessInput;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LongValues;
+import org.apache.lucene.util.compress.LZ4;
import org.apache.lucene.util.packed.DirectMonotonicReader;
+import org.apache.lucene.util.packed.PackedInts;
import org.elasticsearch.core.IOUtils;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
+import static org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat.TERMS_DICT_BLOCK_LZ4_SHIFT;
+
public class ES87TSDBDocValuesProducer extends DocValuesProducer {
private final Map numerics = new HashMap<>();
+ private final Map sorted = new HashMap<>();
+ private final Map sortedSets = new HashMap<>();
private final Map sortedNumerics = new HashMap<>();
private final IndexInput data;
private final int maxDoc;
@@ -101,7 +114,7 @@ public class ES87TSDBDocValuesProducer extends DocValuesProducer {
@Override
public NumericDocValues getNumeric(FieldInfo field) throws IOException {
NumericEntry entry = numerics.get(field.name);
- return getNumeric(entry);
+ return getNumeric(entry, -1);
}
@Override
@@ -111,18 +124,433 @@ public BinaryDocValues getBinary(FieldInfo field) throws IOException {
@Override
public SortedDocValues getSorted(FieldInfo field) throws IOException {
- throw new UnsupportedOperationException("Unsupported sorted doc values for field [" + field.name + "]");
+ SortedEntry entry = sorted.get(field.name);
+ return getSorted(entry);
+ }
+
+ private SortedDocValues getSorted(SortedEntry entry) throws IOException {
+ final NumericDocValues ords = getNumeric(entry.ordsEntry, entry.termsDictEntry.termsDictSize);
+ return new BaseSortedDocValues(entry) {
+
+ @Override
+ public int ordValue() throws IOException {
+ return (int) ords.longValue();
+ }
+
+ @Override
+ public boolean advanceExact(int target) throws IOException {
+ return ords.advanceExact(target);
+ }
+
+ @Override
+ public int docID() {
+ return ords.docID();
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return ords.nextDoc();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return ords.advance(target);
+ }
+
+ @Override
+ public long cost() {
+ return ords.cost();
+ }
+ };
+ }
+
+ private abstract class BaseSortedDocValues extends SortedDocValues {
+
+ final SortedEntry entry;
+ final TermsEnum termsEnum;
+
+ BaseSortedDocValues(SortedEntry entry) throws IOException {
+ this.entry = entry;
+ this.termsEnum = termsEnum();
+ }
+
+ @Override
+ public int getValueCount() {
+ return Math.toIntExact(entry.termsDictEntry.termsDictSize);
+ }
+
+ @Override
+ public BytesRef lookupOrd(int ord) throws IOException {
+ termsEnum.seekExact(ord);
+ return termsEnum.term();
+ }
+
+ @Override
+ public int lookupTerm(BytesRef key) throws IOException {
+ TermsEnum.SeekStatus status = termsEnum.seekCeil(key);
+ switch (status) {
+ case FOUND:
+ return Math.toIntExact(termsEnum.ord());
+ case NOT_FOUND:
+ case END:
+ default:
+ return Math.toIntExact(-1L - termsEnum.ord());
+ }
+ }
+
+ @Override
+ public TermsEnum termsEnum() throws IOException {
+ return new TermsDict(entry.termsDictEntry, data);
+ }
+ }
+
+ private abstract class BaseSortedSetDocValues extends SortedSetDocValues {
+
+ final SortedSetEntry entry;
+ final IndexInput data;
+ final TermsEnum termsEnum;
+
+ BaseSortedSetDocValues(SortedSetEntry entry, IndexInput data) throws IOException {
+ this.entry = entry;
+ this.data = data;
+ this.termsEnum = termsEnum();
+ }
+
+ @Override
+ public long getValueCount() {
+ return entry.termsDictEntry.termsDictSize;
+ }
+
+ @Override
+ public BytesRef lookupOrd(long ord) throws IOException {
+ termsEnum.seekExact(ord);
+ return termsEnum.term();
+ }
+
+ @Override
+ public long lookupTerm(BytesRef key) throws IOException {
+ TermsEnum.SeekStatus status = termsEnum.seekCeil(key);
+ switch (status) {
+ case FOUND:
+ return termsEnum.ord();
+ case NOT_FOUND:
+ case END:
+ default:
+ return -1L - termsEnum.ord();
+ }
+ }
+
+ @Override
+ public TermsEnum termsEnum() throws IOException {
+ return new TermsDict(entry.termsDictEntry, data);
+ }
+ }
+
+ private class TermsDict extends BaseTermsEnum {
+ static final int LZ4_DECOMPRESSOR_PADDING = 7;
+
+ final TermsDictEntry entry;
+ final LongValues blockAddresses;
+ final IndexInput bytes;
+ final long blockMask;
+ final LongValues indexAddresses;
+ final IndexInput indexBytes;
+ final BytesRef term;
+ long ord = -1;
+
+ BytesRef blockBuffer = null;
+ ByteArrayDataInput blockInput = null;
+ long currentCompressedBlockStart = -1;
+ long currentCompressedBlockEnd = -1;
+
+ TermsDict(TermsDictEntry entry, IndexInput data) throws IOException {
+ this.entry = entry;
+ RandomAccessInput addressesSlice = data.randomAccessSlice(entry.termsAddressesOffset, entry.termsAddressesLength);
+ blockAddresses = DirectMonotonicReader.getInstance(entry.termsAddressesMeta, addressesSlice);
+ bytes = data.slice("terms", entry.termsDataOffset, entry.termsDataLength);
+ blockMask = (1L << TERMS_DICT_BLOCK_LZ4_SHIFT) - 1;
+ RandomAccessInput indexAddressesSlice = data.randomAccessSlice(
+ entry.termsIndexAddressesOffset,
+ entry.termsIndexAddressesLength
+ );
+ indexAddresses = DirectMonotonicReader.getInstance(entry.termsIndexAddressesMeta, indexAddressesSlice);
+ indexBytes = data.slice("terms-index", entry.termsIndexOffset, entry.termsIndexLength);
+ term = new BytesRef(entry.maxTermLength);
+
+ // add the max term length for the dictionary
+ // add 7 padding bytes can help decompression run faster.
+ int bufferSize = entry.maxBlockLength + entry.maxTermLength + LZ4_DECOMPRESSOR_PADDING;
+ blockBuffer = new BytesRef(new byte[bufferSize], 0, bufferSize);
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ if (++ord >= entry.termsDictSize) {
+ return null;
+ }
+
+ if ((ord & blockMask) == 0L) {
+ decompressBlock();
+ } else {
+ DataInput input = blockInput;
+ final int token = Byte.toUnsignedInt(input.readByte());
+ int prefixLength = token & 0x0F;
+ int suffixLength = 1 + (token >>> 4);
+ if (prefixLength == 15) {
+ prefixLength += input.readVInt();
+ }
+ if (suffixLength == 16) {
+ suffixLength += input.readVInt();
+ }
+ term.length = prefixLength + suffixLength;
+ input.readBytes(term.bytes, prefixLength, suffixLength);
+ }
+ return term;
+ }
+
+ @Override
+ public void seekExact(long ord) throws IOException {
+ if (ord < 0 || ord >= entry.termsDictSize) {
+ throw new IndexOutOfBoundsException();
+ }
+ // Signed shift since ord is -1 when the terms enum is not positioned
+ final long currentBlockIndex = this.ord >> TERMS_DICT_BLOCK_LZ4_SHIFT;
+ final long blockIndex = ord >> TERMS_DICT_BLOCK_LZ4_SHIFT;
+ if (ord < this.ord || blockIndex != currentBlockIndex) {
+ // The looked up ord is before the current ord or belongs to a different block, seek again
+ final long blockAddress = blockAddresses.get(blockIndex);
+ bytes.seek(blockAddress);
+ this.ord = (blockIndex << TERMS_DICT_BLOCK_LZ4_SHIFT) - 1;
+ }
+ // Scan to the looked up ord
+ while (this.ord < ord) {
+ next();
+ }
+ }
+
+ private BytesRef getTermFromIndex(long index) throws IOException {
+ assert index >= 0 && index <= (entry.termsDictSize - 1) >>> entry.termsDictIndexShift;
+ final long start = indexAddresses.get(index);
+ term.length = (int) (indexAddresses.get(index + 1) - start);
+ indexBytes.seek(start);
+ indexBytes.readBytes(term.bytes, 0, term.length);
+ return term;
+ }
+
+ private long seekTermsIndex(BytesRef text) throws IOException {
+ long lo = 0L;
+ long hi = (entry.termsDictSize - 1) >> entry.termsDictIndexShift;
+ while (lo <= hi) {
+ final long mid = (lo + hi) >>> 1;
+ getTermFromIndex(mid);
+ final int cmp = term.compareTo(text);
+ if (cmp <= 0) {
+ lo = mid + 1;
+ } else {
+ hi = mid - 1;
+ }
+ }
+
+ assert hi < 0 || getTermFromIndex(hi).compareTo(text) <= 0;
+ assert hi == ((entry.termsDictSize - 1) >> entry.termsDictIndexShift) || getTermFromIndex(hi + 1).compareTo(text) > 0;
+
+ return hi;
+ }
+
+ private BytesRef getFirstTermFromBlock(long block) throws IOException {
+ assert block >= 0 && block <= (entry.termsDictSize - 1) >>> TERMS_DICT_BLOCK_LZ4_SHIFT;
+ final long blockAddress = blockAddresses.get(block);
+ bytes.seek(blockAddress);
+ term.length = bytes.readVInt();
+ bytes.readBytes(term.bytes, 0, term.length);
+ return term;
+ }
+
+ private long seekBlock(BytesRef text) throws IOException {
+ long index = seekTermsIndex(text);
+ if (index == -1L) {
+ return -1L;
+ }
+
+ long ordLo = index << entry.termsDictIndexShift;
+ long ordHi = Math.min(entry.termsDictSize, ordLo + (1L << entry.termsDictIndexShift)) - 1L;
+
+ long blockLo = ordLo >>> TERMS_DICT_BLOCK_LZ4_SHIFT;
+ long blockHi = ordHi >>> TERMS_DICT_BLOCK_LZ4_SHIFT;
+
+ while (blockLo <= blockHi) {
+ final long blockMid = (blockLo + blockHi) >>> 1;
+ getFirstTermFromBlock(blockMid);
+ final int cmp = term.compareTo(text);
+ if (cmp <= 0) {
+ blockLo = blockMid + 1;
+ } else {
+ blockHi = blockMid - 1;
+ }
+ }
+
+ assert blockHi < 0 || getFirstTermFromBlock(blockHi).compareTo(text) <= 0;
+ assert blockHi == ((entry.termsDictSize - 1) >>> TERMS_DICT_BLOCK_LZ4_SHIFT)
+ || getFirstTermFromBlock(blockHi + 1).compareTo(text) > 0;
+
+ return blockHi;
+ }
+
+ @Override
+ public SeekStatus seekCeil(BytesRef text) throws IOException {
+ final long block = seekBlock(text);
+ if (block == -1) {
+ // before the first term, or empty terms dict
+ if (entry.termsDictSize == 0) {
+ ord = 0;
+ return SeekStatus.END;
+ } else {
+ seekExact(0L);
+ return SeekStatus.NOT_FOUND;
+ }
+ }
+ final long blockAddress = blockAddresses.get(block);
+ this.ord = block << TERMS_DICT_BLOCK_LZ4_SHIFT;
+ bytes.seek(blockAddress);
+ decompressBlock();
+
+ while (true) {
+ int cmp = term.compareTo(text);
+ if (cmp == 0) {
+ return SeekStatus.FOUND;
+ } else if (cmp > 0) {
+ return SeekStatus.NOT_FOUND;
+ }
+ if (next() == null) {
+ return SeekStatus.END;
+ }
+ }
+ }
+
+ private void decompressBlock() throws IOException {
+ // The first term is kept uncompressed, so no need to decompress block if only
+ // look up the first term when doing seek block.
+ term.length = bytes.readVInt();
+ bytes.readBytes(term.bytes, 0, term.length);
+ long offset = bytes.getFilePointer();
+ if (offset < entry.termsDataLength - 1) {
+ // Avoid decompress again if we are reading a same block.
+ if (currentCompressedBlockStart != offset) {
+ blockBuffer.offset = term.length;
+ blockBuffer.length = bytes.readVInt();
+ // Decompress the remaining of current block, using the first term as a dictionary
+ System.arraycopy(term.bytes, 0, blockBuffer.bytes, 0, blockBuffer.offset);
+ LZ4.decompress(bytes, blockBuffer.length, blockBuffer.bytes, blockBuffer.offset);
+ currentCompressedBlockStart = offset;
+ currentCompressedBlockEnd = bytes.getFilePointer();
+ } else {
+ // Skip decompression but need to re-seek to block end.
+ bytes.seek(currentCompressedBlockEnd);
+ }
+
+ // Reset the buffer.
+ blockInput = new ByteArrayDataInput(blockBuffer.bytes, blockBuffer.offset, blockBuffer.length);
+ }
+ }
+
+ @Override
+ public BytesRef term() throws IOException {
+ return term;
+ }
+
+ @Override
+ public long ord() throws IOException {
+ return ord;
+ }
+
+ @Override
+ public long totalTermFreq() throws IOException {
+ return -1L;
+ }
+
+ @Override
+ public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ImpactsEnum impacts(int flags) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int docFreq() throws IOException {
+ throw new UnsupportedOperationException();
+ }
}
@Override
public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
SortedNumericEntry entry = sortedNumerics.get(field.name);
- return getSortedNumeric(entry);
+ return getSortedNumeric(entry, -1);
}
@Override
public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException {
- throw new UnsupportedOperationException("Unsupported sorted set doc values for field [" + field.name + "]");
+ SortedSetEntry entry = sortedSets.get(field.name);
+ if (entry.singleValueEntry != null) {
+ return DocValues.singleton(getSorted(entry.singleValueEntry));
+ }
+
+ SortedNumericEntry ordsEntry = entry.ordsEntry;
+ final SortedNumericDocValues ords = getSortedNumeric(ordsEntry, entry.termsDictEntry.termsDictSize);
+ return new BaseSortedSetDocValues(entry, data) {
+
+ int i = 0;
+ int count = 0;
+ boolean set = false;
+
+ @Override
+ public long nextOrd() throws IOException {
+ if (set == false) {
+ set = true;
+ i = 0;
+ count = ords.docValueCount();
+ }
+ if (i++ == count) {
+ return NO_MORE_ORDS;
+ }
+ return ords.nextValue();
+ }
+
+ @Override
+ public int docValueCount() {
+ return ords.docValueCount();
+ }
+
+ @Override
+ public boolean advanceExact(int target) throws IOException {
+ set = false;
+ return ords.advanceExact(target);
+ }
+
+ @Override
+ public int docID() {
+ return ords.docID();
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ set = false;
+ return ords.nextDoc();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ set = false;
+ return ords.advance(target);
+ }
+
+ @Override
+ public long cost() {
+ return ords.cost();
+ }
+ };
}
@Override
@@ -147,9 +575,9 @@ private void readFields(IndexInput meta, FieldInfos infos) throws IOException {
} else if (type == ES87TSDBDocValuesFormat.BINARY) {
throw new CorruptIndexException("unsupported type: " + type, meta);
} else if (type == ES87TSDBDocValuesFormat.SORTED) {
- throw new CorruptIndexException("unsupported type: " + type, meta);
+ sorted.put(info.name, readSorted(meta));
} else if (type == ES87TSDBDocValuesFormat.SORTED_SET) {
- throw new CorruptIndexException("unsupported type: " + type, meta);
+ sortedSets.put(info.name, readSortedSet(meta));
} else if (type == ES87TSDBDocValuesFormat.SORTED_NUMERIC) {
sortedNumerics.put(info.name, readSortedNumeric(meta));
} else {
@@ -172,11 +600,15 @@ private static void readNumeric(IndexInput meta, NumericEntry entry) throws IOEx
entry.numValues = meta.readLong();
if (entry.numValues > 0) {
final int indexBlockShift = meta.readInt();
- entry.indexMeta = DirectMonotonicReader.loadMeta(
- meta,
- 1 + ((entry.numValues - 1) >>> ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SHIFT),
- indexBlockShift
- );
+ // Special case, -1 means there are no blocks, so no need to load the metadata for it
+ // -1 is written when there the cardinality of a field is exactly one.
+ if (indexBlockShift != -1) {
+ entry.indexMeta = DirectMonotonicReader.loadMeta(
+ meta,
+ 1 + ((entry.numValues - 1) >>> ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SHIFT),
+ indexBlockShift
+ );
+ }
entry.indexOffset = meta.readLong();
entry.indexLength = meta.readLong();
entry.valuesOffset = meta.readLong();
@@ -202,16 +634,152 @@ private static SortedNumericEntry readSortedNumeric(IndexInput meta, SortedNumer
return entry;
}
+ private SortedEntry readSorted(IndexInput meta) throws IOException {
+ SortedEntry entry = new SortedEntry();
+ entry.ordsEntry = new NumericEntry();
+ readNumeric(meta, entry.ordsEntry);
+ entry.termsDictEntry = new TermsDictEntry();
+ readTermDict(meta, entry.termsDictEntry);
+ return entry;
+ }
+
+ private SortedSetEntry readSortedSet(IndexInput meta) throws IOException {
+ SortedSetEntry entry = new SortedSetEntry();
+ byte multiValued = meta.readByte();
+ switch (multiValued) {
+ case 0: // singlevalued
+ entry.singleValueEntry = readSorted(meta);
+ return entry;
+ case 1: // multivalued
+ break;
+ default:
+ throw new CorruptIndexException("Invalid multiValued flag: " + multiValued, meta);
+ }
+ entry.ordsEntry = new SortedNumericEntry();
+ readSortedNumeric(meta, entry.ordsEntry);
+ entry.termsDictEntry = new TermsDictEntry();
+ readTermDict(meta, entry.termsDictEntry);
+ return entry;
+ }
+
+ private static void readTermDict(IndexInput meta, TermsDictEntry entry) throws IOException {
+ entry.termsDictSize = meta.readVLong();
+ final int blockShift = meta.readInt();
+ final long addressesSize = (entry.termsDictSize + (1L << TERMS_DICT_BLOCK_LZ4_SHIFT) - 1) >>> TERMS_DICT_BLOCK_LZ4_SHIFT;
+ entry.termsAddressesMeta = DirectMonotonicReader.loadMeta(meta, addressesSize, blockShift);
+ entry.maxTermLength = meta.readInt();
+ entry.maxBlockLength = meta.readInt();
+ entry.termsDataOffset = meta.readLong();
+ entry.termsDataLength = meta.readLong();
+ entry.termsAddressesOffset = meta.readLong();
+ entry.termsAddressesLength = meta.readLong();
+ entry.termsDictIndexShift = meta.readInt();
+ final long indexSize = (entry.termsDictSize + (1L << entry.termsDictIndexShift) - 1) >>> entry.termsDictIndexShift;
+ entry.termsIndexAddressesMeta = DirectMonotonicReader.loadMeta(meta, 1 + indexSize, blockShift);
+ entry.termsIndexOffset = meta.readLong();
+ entry.termsIndexLength = meta.readLong();
+ entry.termsIndexAddressesOffset = meta.readLong();
+ entry.termsIndexAddressesLength = meta.readLong();
+ }
+
private abstract static class NumericValues {
abstract long advance(long index) throws IOException;
}
- private NumericDocValues getNumeric(NumericEntry entry) throws IOException {
+ private NumericDocValues getNumeric(NumericEntry entry, long maxOrd) throws IOException {
if (entry.docsWithFieldOffset == -2) {
// empty
return DocValues.emptyNumeric();
}
+ if (maxOrd == 1) {
+ // Special case for maxOrd 1, no need to read blocks and use ordinal 0 as only value
+ if (entry.docsWithFieldOffset == -1) {
+ // Special case when all docs have a value
+ return new NumericDocValues() {
+
+ private final int maxDoc = ES87TSDBDocValuesProducer.this.maxDoc;
+ private int doc = -1;
+
+ @Override
+ public long longValue() {
+ // Only one ordinal!
+ return 0L;
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return advance(doc + 1);
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ if (target >= maxDoc) {
+ return doc = NO_MORE_DOCS;
+ }
+ return doc = target;
+ }
+
+ @Override
+ public boolean advanceExact(int target) {
+ doc = target;
+ return true;
+ }
+
+ @Override
+ public long cost() {
+ return maxDoc;
+ }
+ };
+ } else {
+ final IndexedDISI disi = new IndexedDISI(
+ data,
+ entry.docsWithFieldOffset,
+ entry.docsWithFieldLength,
+ entry.jumpTableEntryCount,
+ entry.denseRankPower,
+ entry.numValues
+ );
+ return new NumericDocValues() {
+
+ @Override
+ public int advance(int target) throws IOException {
+ return disi.advance(target);
+ }
+
+ @Override
+ public boolean advanceExact(int target) throws IOException {
+ return disi.advanceExact(target);
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return disi.nextDoc();
+ }
+
+ @Override
+ public int docID() {
+ return disi.docID();
+ }
+
+ @Override
+ public long cost() {
+ return disi.cost();
+ }
+
+ @Override
+ public long longValue() {
+ return 0L;
+ }
+ };
+ }
+ }
+
// NOTE: we could make this a bit simpler by reusing #getValues but this
// makes things slower.
@@ -219,6 +787,7 @@ private NumericDocValues getNumeric(NumericEntry entry) throws IOException {
final DirectMonotonicReader indexReader = DirectMonotonicReader.getInstance(entry.indexMeta, indexSlice);
final IndexInput valuesData = data.slice("values", entry.valuesOffset, entry.valuesLength);
+ final int bitsPerOrd = maxOrd >= 0 ? PackedInts.bitsRequired(maxOrd - 1) : -1;
if (entry.docsWithFieldOffset == -1) {
// dense
return new NumericDocValues() {
@@ -269,7 +838,11 @@ public long longValue() throws IOException {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
- decoder.decode(valuesData, currentBlock);
+ if (maxOrd >= 0) {
+ decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
+ } else {
+ decoder.decode(valuesData, currentBlock);
+ }
}
return currentBlock[blockInIndex];
}
@@ -325,7 +898,11 @@ public long longValue() throws IOException {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
- decoder.decode(valuesData, currentBlock);
+ if (maxOrd >= 0) {
+ decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
+ } else {
+ decoder.decode(valuesData, currentBlock);
+ }
}
return currentBlock[blockInIndex];
}
@@ -333,12 +910,13 @@ public long longValue() throws IOException {
}
}
- private NumericValues getValues(NumericEntry entry) throws IOException {
+ private NumericValues getValues(NumericEntry entry, final long maxOrd) throws IOException {
assert entry.numValues > 0;
final RandomAccessInput indexSlice = data.randomAccessSlice(entry.indexOffset, entry.indexLength);
final DirectMonotonicReader indexReader = DirectMonotonicReader.getInstance(entry.indexMeta, indexSlice);
final IndexInput valuesData = data.slice("values", entry.valuesOffset, entry.valuesLength);
+ final int bitsPerOrd = maxOrd >= 0 ? PackedInts.bitsRequired(maxOrd - 1) : -1;
return new NumericValues() {
private final ES87TSDBDocValuesEncoder decoder = new ES87TSDBDocValuesEncoder();
@@ -355,22 +933,26 @@ long advance(long index) throws IOException {
valuesData.seek(indexReader.get(blockIndex));
}
currentBlockIndex = blockIndex;
- decoder.decode(valuesData, currentBlock);
+ if (bitsPerOrd >= 0) {
+ decoder.decodeOrdinals(valuesData, currentBlock, bitsPerOrd);
+ } else {
+ decoder.decode(valuesData, currentBlock);
+ }
}
return currentBlock[blockInIndex];
}
};
}
- private SortedNumericDocValues getSortedNumeric(SortedNumericEntry entry) throws IOException {
+ private SortedNumericDocValues getSortedNumeric(SortedNumericEntry entry, long maxOrd) throws IOException {
if (entry.numValues == entry.numDocsWithField) {
- return DocValues.singleton(getNumeric(entry));
+ return DocValues.singleton(getNumeric(entry, maxOrd));
}
final RandomAccessInput addressesInput = data.randomAccessSlice(entry.addressesOffset, entry.addressesLength);
final LongValues addresses = DirectMonotonicReader.getInstance(entry.addressesMeta, addressesInput);
- final NumericValues values = getValues(entry);
+ final NumericValues values = getValues(entry, maxOrd);
if (entry.docsWithFieldOffset == -1) {
// dense
@@ -514,4 +1096,33 @@ private static class SortedNumericEntry extends NumericEntry {
long addressesLength;
}
+ private static class SortedEntry {
+ NumericEntry ordsEntry;
+ TermsDictEntry termsDictEntry;
+ }
+
+ private static class SortedSetEntry {
+ SortedEntry singleValueEntry;
+ SortedNumericEntry ordsEntry;
+ TermsDictEntry termsDictEntry;
+ }
+
+ private static class TermsDictEntry {
+ long termsDictSize;
+ DirectMonotonicReader.Meta termsAddressesMeta;
+ int maxTermLength;
+ long termsDataOffset;
+ long termsDataLength;
+ long termsAddressesOffset;
+ long termsAddressesLength;
+ int termsDictIndexShift;
+ DirectMonotonicReader.Meta termsIndexAddressesMeta;
+ long termsIndexOffset;
+ long termsIndexLength;
+ long termsIndexAddressesOffset;
+ long termsIndexAddressesLength;
+
+ int maxBlockLength;
+ }
+
}
diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java
new file mode 100644
index 0000000000000..1813601fc9477
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java
@@ -0,0 +1,152 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.codec.vectors;
+
+import org.apache.lucene.codecs.FlatVectorsFormat;
+import org.apache.lucene.codecs.FlatVectorsReader;
+import org.apache.lucene.codecs.FlatVectorsWriter;
+import org.apache.lucene.codecs.KnnFieldVectorsWriter;
+import org.apache.lucene.codecs.KnnVectorsFormat;
+import org.apache.lucene.codecs.KnnVectorsReader;
+import org.apache.lucene.codecs.KnnVectorsWriter;
+import org.apache.lucene.codecs.lucene99.Lucene99FlatVectorsFormat;
+import org.apache.lucene.index.ByteVectorValues;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FloatVectorValues;
+import org.apache.lucene.index.MergeState;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.Sorter;
+import org.apache.lucene.search.KnnCollector;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.hnsw.OrdinalTranslatedKnnCollector;
+import org.apache.lucene.util.hnsw.RandomVectorScorer;
+
+import java.io.IOException;
+
+public class ES813FlatVectorFormat extends KnnVectorsFormat {
+
+ static final String NAME = "ES813FlatVectorFormat";
+
+ private final FlatVectorsFormat format = new Lucene99FlatVectorsFormat();
+
+ /**
+ * Sole constructor
+ */
+ public ES813FlatVectorFormat() {
+ super(NAME);
+ }
+
+ @Override
+ public KnnVectorsWriter fieldsWriter(SegmentWriteState state) throws IOException {
+ return new ES813FlatVectorWriter(format.fieldsWriter(state));
+ }
+
+ @Override
+ public KnnVectorsReader fieldsReader(SegmentReadState state) throws IOException {
+ return new ES813FlatVectorReader(format.fieldsReader(state));
+ }
+
+ public static class ES813FlatVectorWriter extends KnnVectorsWriter {
+
+ private final FlatVectorsWriter writer;
+
+ public ES813FlatVectorWriter(FlatVectorsWriter writer) {
+ super();
+ this.writer = writer;
+ }
+
+ @Override
+ public KnnFieldVectorsWriter> addField(FieldInfo fieldInfo) throws IOException {
+ return writer.addField(fieldInfo, null);
+ }
+
+ @Override
+ public void flush(int maxDoc, Sorter.DocMap sortMap) throws IOException {
+ writer.flush(maxDoc, sortMap);
+ }
+
+ @Override
+ public void finish() throws IOException {
+ writer.finish();
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.close();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return writer.ramBytesUsed();
+ }
+
+ @Override
+ public void mergeOneField(FieldInfo fieldInfo, MergeState mergeState) throws IOException {
+ writer.mergeOneField(fieldInfo, mergeState);
+ }
+ }
+
+ public static class ES813FlatVectorReader extends KnnVectorsReader {
+
+ private final FlatVectorsReader reader;
+
+ public ES813FlatVectorReader(FlatVectorsReader reader) {
+ super();
+ this.reader = reader;
+ }
+
+ @Override
+ public void checkIntegrity() throws IOException {
+ reader.checkIntegrity();
+ }
+
+ @Override
+ public FloatVectorValues getFloatVectorValues(String field) throws IOException {
+ return reader.getFloatVectorValues(field);
+ }
+
+ @Override
+ public ByteVectorValues getByteVectorValues(String field) throws IOException {
+ return reader.getByteVectorValues(field);
+ }
+
+ @Override
+ public void search(String field, float[] target, KnnCollector knnCollector, Bits acceptDocs) throws IOException {
+ collectAllMatchingDocs(knnCollector, acceptDocs, reader.getRandomVectorScorer(field, target));
+ }
+
+ private void collectAllMatchingDocs(KnnCollector knnCollector, Bits acceptDocs, RandomVectorScorer scorer) throws IOException {
+ OrdinalTranslatedKnnCollector collector = new OrdinalTranslatedKnnCollector(knnCollector, scorer::ordToDoc);
+ Bits acceptedOrds = scorer.getAcceptOrds(acceptDocs);
+ for (int i = 0; i < scorer.maxOrd(); i++) {
+ if (acceptedOrds == null || acceptedOrds.get(i)) {
+ collector.collect(i, scorer.score(i));
+ collector.incVisitedCount(1);
+ }
+ }
+ assert collector.earlyTerminated() == false;
+ }
+
+ @Override
+ public void search(String field, byte[] target, KnnCollector knnCollector, Bits acceptDocs) throws IOException {
+ collectAllMatchingDocs(knnCollector, acceptDocs, reader.getRandomVectorScorer(field, target));
+ }
+
+ @Override
+ public void close() throws IOException {
+ reader.close();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return reader.ramBytesUsed();
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormat.java
new file mode 100644
index 0000000000000..5764f31d018c4
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormat.java
@@ -0,0 +1,158 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.codec.vectors;
+
+import org.apache.lucene.codecs.FlatVectorsFormat;
+import org.apache.lucene.codecs.FlatVectorsReader;
+import org.apache.lucene.codecs.FlatVectorsWriter;
+import org.apache.lucene.codecs.KnnFieldVectorsWriter;
+import org.apache.lucene.codecs.KnnVectorsFormat;
+import org.apache.lucene.codecs.KnnVectorsReader;
+import org.apache.lucene.codecs.KnnVectorsWriter;
+import org.apache.lucene.codecs.lucene99.Lucene99ScalarQuantizedVectorsFormat;
+import org.apache.lucene.index.ByteVectorValues;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FloatVectorValues;
+import org.apache.lucene.index.MergeState;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.Sorter;
+import org.apache.lucene.search.KnnCollector;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.hnsw.OrdinalTranslatedKnnCollector;
+import org.apache.lucene.util.hnsw.RandomVectorScorer;
+
+import java.io.IOException;
+
+public class ES813Int8FlatVectorFormat extends KnnVectorsFormat {
+
+ static final String NAME = "ES813Int8FlatVectorFormat";
+
+ private final FlatVectorsFormat format;
+
+ public ES813Int8FlatVectorFormat() {
+ this(null);
+ }
+
+ /**
+ * Sole constructor
+ */
+ public ES813Int8FlatVectorFormat(Float confidenceInterval) {
+ super(NAME);
+ this.format = new Lucene99ScalarQuantizedVectorsFormat(confidenceInterval);
+ }
+
+ @Override
+ public KnnVectorsWriter fieldsWriter(SegmentWriteState state) throws IOException {
+ return new ES813FlatVectorWriter(format.fieldsWriter(state));
+ }
+
+ @Override
+ public KnnVectorsReader fieldsReader(SegmentReadState state) throws IOException {
+ return new ES813FlatVectorReader(format.fieldsReader(state));
+ }
+
+ public static class ES813FlatVectorWriter extends KnnVectorsWriter {
+
+ private final FlatVectorsWriter writer;
+
+ public ES813FlatVectorWriter(FlatVectorsWriter writer) {
+ super();
+ this.writer = writer;
+ }
+
+ @Override
+ public KnnFieldVectorsWriter> addField(FieldInfo fieldInfo) throws IOException {
+ return writer.addField(fieldInfo, null);
+ }
+
+ @Override
+ public void flush(int maxDoc, Sorter.DocMap sortMap) throws IOException {
+ writer.flush(maxDoc, sortMap);
+ }
+
+ @Override
+ public void finish() throws IOException {
+ writer.finish();
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.close();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return writer.ramBytesUsed();
+ }
+
+ @Override
+ public void mergeOneField(FieldInfo fieldInfo, MergeState mergeState) throws IOException {
+ writer.mergeOneField(fieldInfo, mergeState);
+ }
+ }
+
+ public static class ES813FlatVectorReader extends KnnVectorsReader {
+
+ private final FlatVectorsReader reader;
+
+ public ES813FlatVectorReader(FlatVectorsReader reader) {
+ super();
+ this.reader = reader;
+ }
+
+ @Override
+ public void checkIntegrity() throws IOException {
+ reader.checkIntegrity();
+ }
+
+ @Override
+ public FloatVectorValues getFloatVectorValues(String field) throws IOException {
+ return reader.getFloatVectorValues(field);
+ }
+
+ @Override
+ public ByteVectorValues getByteVectorValues(String field) throws IOException {
+ return reader.getByteVectorValues(field);
+ }
+
+ @Override
+ public void search(String field, float[] target, KnnCollector knnCollector, Bits acceptDocs) throws IOException {
+ collectAllMatchingDocs(knnCollector, acceptDocs, reader.getRandomVectorScorer(field, target));
+ }
+
+ private void collectAllMatchingDocs(KnnCollector knnCollector, Bits acceptDocs, RandomVectorScorer scorer) throws IOException {
+ OrdinalTranslatedKnnCollector collector = new OrdinalTranslatedKnnCollector(knnCollector, scorer::ordToDoc);
+ Bits acceptedOrds = scorer.getAcceptOrds(acceptDocs);
+ for (int i = 0; i < scorer.maxOrd(); i++) {
+ if (acceptedOrds == null || acceptedOrds.get(i)) {
+ collector.collect(i, scorer.score(i));
+ collector.incVisitedCount(1);
+ }
+ }
+ assert collector.earlyTerminated() == false;
+ }
+
+ @Override
+ public void search(String field, byte[] target, KnnCollector knnCollector, Bits acceptDocs) throws IOException {
+ collectAllMatchingDocs(knnCollector, acceptDocs, reader.getRandomVectorScorer(field, target));
+ }
+
+ @Override
+ public void close() throws IOException {
+ reader.close();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return reader.ramBytesUsed();
+ }
+
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java
index 0a669fb0ade8a..66c5de61bcd92 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java
@@ -335,7 +335,7 @@ public final boolean addDynamicMapper(Mapper mapper) {
if (mappingLookup.getMapper(mapper.name()) == null
&& mappingLookup.objectMappers().containsKey(mapper.name()) == false
&& dynamicMappers.containsKey(mapper.name()) == false) {
- int mapperSize = mapper.mapperSize();
+ int mapperSize = mapper.getTotalFieldsCount();
int additionalFieldsToAdd = getNewFieldsSize() + mapperSize;
if (indexSettings().isIgnoreDynamicFieldsBeyondLimit()) {
if (mappingLookup.exceedsLimit(indexSettings().getMappingTotalFieldsLimit(), additionalFieldsToAdd)) {
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java
index c24ff9bb9c277..97d1b9368a6c9 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java
@@ -113,6 +113,11 @@ public void validate(MappingLookup mappers) {
}
}
+ @Override
+ public int getTotalFieldsCount() {
+ return 1;
+ }
+
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext)
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
index 9ed23f61bf0ea..75d9fed2a4d4b 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
@@ -51,6 +51,7 @@
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
+import java.util.stream.Stream;
import static org.elasticsearch.core.Strings.format;
@@ -428,6 +429,11 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE
protected abstract String contentType();
+ @Override
+ public int getTotalFieldsCount() {
+ return 1 + Stream.of(multiFields.mappers).mapToInt(FieldMapper::getTotalFieldsCount).sum();
+ }
+
public Map indexAnalyzers() {
return Map.of();
}
@@ -455,7 +461,7 @@ private void add(FieldMapper mapper) {
private void update(FieldMapper toMerge, MapperMergeContext context) {
if (mapperBuilders.containsKey(toMerge.simpleName()) == false) {
- if (context.decrementFieldBudgetIfPossible(toMerge.mapperSize())) {
+ if (context.decrementFieldBudgetIfPossible(toMerge.getTotalFieldsCount())) {
add(toMerge);
}
} else {
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java
index ca15248c037bc..397f99f63030c 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java
@@ -137,16 +137,8 @@ public static FieldType freezeAndDeduplicateFieldType(FieldType fieldType) {
}
/**
- * Returns the size this mapper counts against the {@linkplain MapperService#INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING field limit}.
- *
- * Needs to be in sync with {@link MappingLookup#getTotalFieldsCount()}.
+ * The total number of fields as defined in the mapping.
+ * Defines how this mapper counts towards {@link MapperService#INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING}.
*/
- public int mapperSize() {
- int size = 1;
- for (Mapper mapper : this) {
- size += mapper.mapperSize();
- }
- return size;
- }
-
+ public abstract int getTotalFieldsCount();
}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java
index 684853f9076ef..96eb0211a4a0c 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java
@@ -55,6 +55,7 @@ private CacheKey() {}
private final List indexTimeScriptMappers;
private final Mapping mapping;
private final Set completionFields;
+ private final int totalFieldsCount;
/**
* Creates a new {@link MappingLookup} instance by parsing the provided mapping and extracting its field definitions.
@@ -127,6 +128,7 @@ private MappingLookup(
Collection objectMappers,
Collection aliasMappers
) {
+ this.totalFieldsCount = mapping.getRoot().getTotalFieldsCount();
this.mapping = mapping;
Map fieldMappers = new HashMap<>();
Map objects = new HashMap<>();
@@ -223,6 +225,14 @@ FieldTypeLookup fieldTypesLookup() {
* Returns the total number of fields defined in the mappings, including field mappers, object mappers as well as runtime fields.
*/
public long getTotalFieldsCount() {
+ return totalFieldsCount;
+ }
+
+ /**
+ * Returns the total number of mappers defined in the mappings, including field mappers and their sub-fields
+ * (which are not explicitly defined in the mappings), multi-fields, object mappers, runtime fields and metadata field mappers.
+ */
+ public long getTotalMapperCount() {
return fieldMappers.size() + objectMappers.size() + runtimeFieldMappersCount;
}
@@ -286,7 +296,7 @@ boolean exceedsLimit(long limit, int additionalFieldsToAdd) {
}
long remainingFieldsUntilLimit(long mappingTotalFieldsLimit) {
- return mappingTotalFieldsLimit - getTotalFieldsCount() + mapping.getSortedMetadataMappers().length;
+ return mappingTotalFieldsLimit - totalFieldsCount;
}
private void checkDimensionFieldLimit(long limit) {
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
index 1ed9713d73e75..0bce02564ef34 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
@@ -182,6 +182,11 @@ public ObjectMapper build(MapperBuilderContext context) {
}
}
+ @Override
+ public int getTotalFieldsCount() {
+ return 1 + mappers.values().stream().mapToInt(Mapper::getTotalFieldsCount).sum();
+ }
+
public static class TypeParser implements Mapper.TypeParser {
@Override
public boolean supportsVersion(IndexVersion indexCreatedVersion) {
@@ -295,7 +300,10 @@ protected static void parseProperties(
}
}
- if (objBuilder.subobjects.value() == false && type.equals(ObjectMapper.CONTENT_TYPE)) {
+ if (objBuilder.subobjects.value() == false
+ && (type.equals(ObjectMapper.CONTENT_TYPE)
+ || type.equals(NestedObjectMapper.CONTENT_TYPE)
+ || type.equals(PassThroughObjectMapper.CONTENT_TYPE))) {
throw new MapperParsingException(
"Tried to add subobject ["
+ fieldName
@@ -304,24 +312,6 @@ protected static void parseProperties(
+ "] which does not support subobjects"
);
}
- if (objBuilder.subobjects.value() == false && type.equals(NestedObjectMapper.CONTENT_TYPE)) {
- throw new MapperParsingException(
- "Tried to add nested object ["
- + fieldName
- + "] to object ["
- + objBuilder.name()
- + "] which does not support subobjects"
- );
- }
- if (type.equals(PassThroughObjectMapper.CONTENT_TYPE) && objBuilder instanceof RootObjectMapper.Builder == false) {
- throw new MapperParsingException(
- "Tried to add passthrough subobject ["
- + fieldName
- + "] to object ["
- + objBuilder.name()
- + "], passthrough is not supported as a subobject"
- );
- }
Mapper.TypeParser typeParser = parserContext.typeParser(type);
if (typeParser == null) {
throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + fieldName + "]");
@@ -562,7 +552,7 @@ private static Map buildMergedMappers(
Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.simpleName());
Mapper merged = null;
if (mergeIntoMapper == null) {
- if (objectMergeContext.decrementFieldBudgetIfPossible(mergeWithMapper.mapperSize())) {
+ if (objectMergeContext.decrementFieldBudgetIfPossible(mergeWithMapper.getTotalFieldsCount())) {
merged = mergeWithMapper;
} else if (mergeWithMapper instanceof ObjectMapper om) {
merged = truncateObjectMapper(reason, objectMergeContext, om);
@@ -596,7 +586,7 @@ private static ObjectMapper truncateObjectMapper(MergeReason reason, MapperMerge
// there's not enough capacity for the whole object mapper,
// so we're just trying to add the shallow object, without it's sub-fields
ObjectMapper shallowObjectMapper = objectMapper.withoutMappers();
- if (context.decrementFieldBudgetIfPossible(shallowObjectMapper.mapperSize())) {
+ if (context.decrementFieldBudgetIfPossible(shallowObjectMapper.getTotalFieldsCount())) {
// now trying to add the sub-fields one by one via a merge, until we hit the limit
return shallowObjectMapper.merge(objectMapper, reason, context);
}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
index 7688b217ab7fc..b49c9328fcc79 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
@@ -57,6 +57,7 @@ public PassThroughObjectMapper.Builder setContainsDimensions() {
public PassThroughObjectMapper build(MapperBuilderContext context) {
return new PassThroughObjectMapper(
name,
+ context.buildFullName(name),
enabled,
dynamic,
buildMappers(context.createChildContext(name)),
@@ -70,19 +71,20 @@ public PassThroughObjectMapper build(MapperBuilderContext context) {
PassThroughObjectMapper(
String name,
+ String fullPath,
Explicit enabled,
Dynamic dynamic,
Map mappers,
Explicit timeSeriesDimensionSubFields
) {
// Subobjects are not currently supported.
- super(name, name, enabled, Explicit.IMPLICIT_FALSE, dynamic, mappers);
+ super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, dynamic, mappers);
this.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields;
}
@Override
PassThroughObjectMapper withoutMappers() {
- return new PassThroughObjectMapper(simpleName(), enabled, dynamic, Map.of(), timeSeriesDimensionSubFields);
+ return new PassThroughObjectMapper(simpleName(), fullPath(), enabled, dynamic, Map.of(), timeSeriesDimensionSubFields);
}
public boolean containsDimensions() {
@@ -91,7 +93,7 @@ public boolean containsDimensions() {
@Override
public PassThroughObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) {
- PassThroughObjectMapper.Builder builder = new PassThroughObjectMapper.Builder(name());
+ PassThroughObjectMapper.Builder builder = new PassThroughObjectMapper.Builder(simpleName());
builder.enabled = enabled;
builder.dynamic = dynamic;
builder.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields;
@@ -108,6 +110,7 @@ public PassThroughObjectMapper merge(ObjectMapper mergeWith, MergeReason reason,
return new PassThroughObjectMapper(
simpleName(),
+ fullPath(),
mergeResult.enabled(),
mergeResult.dynamic(),
mergeResult.mappers(),
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
index 86bdb2aa2bba7..2fe8c49df2175 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
@@ -45,6 +45,7 @@
public class RootObjectMapper extends ObjectMapper {
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RootObjectMapper.class);
+ private static final int MAX_NESTING_LEVEL_FOR_PASS_THROUGH_OBJECTS = 20;
/**
* Parameter used when serializing {@link RootObjectMapper} and request that the runtime section is skipped.
@@ -111,7 +112,11 @@ public RootObjectMapper.Builder addRuntimeFields(Map runti
@Override
public RootObjectMapper build(MapperBuilderContext context) {
Map mappers = buildMappers(context);
- mappers.putAll(getAliasMappers(mappers, context));
+
+ Map aliasMappers = new HashMap<>();
+ getAliasMappers(mappers, aliasMappers, context, 0);
+ mappers.putAll(aliasMappers);
+
return new RootObjectMapper(
name,
enabled,
@@ -126,8 +131,11 @@ public RootObjectMapper build(MapperBuilderContext context) {
);
}
- Map getAliasMappers(Map mappers, MapperBuilderContext context) {
- Map aliasMappers = new HashMap<>();
+ void getAliasMappers(Map mappers, Map aliasMappers, MapperBuilderContext context, int level) {
+ if (level >= MAX_NESTING_LEVEL_FOR_PASS_THROUGH_OBJECTS) {
+ logger.warn("Exceeded maximum nesting level for searching for pass-through object fields within object fields.");
+ return;
+ }
for (Mapper mapper : mappers.values()) {
// Create aliases for all fields in child passthrough mappers and place them under the root object.
if (mapper instanceof PassThroughObjectMapper passthroughMapper) {
@@ -154,9 +162,11 @@ Map getAliasMappers(Map mappers, MapperBuilderCo
}
}
}
+ } else if (mapper instanceof ObjectMapper objectMapper) {
+ // Call recursively to check child fields. The level guards against long recursive call sequences.
+ getAliasMappers(objectMapper.mappers, aliasMappers, context, level + 1);
}
}
- return aliasMappers;
}
}
@@ -566,11 +576,7 @@ private static boolean processField(
}
@Override
- public int mapperSize() {
- int size = runtimeFields().size();
- for (Mapper mapper : this) {
- size += mapper.mapperSize();
- }
- return size;
+ public int getTotalFieldsCount() {
+ return mappers.values().stream().mapToInt(Mapper::getTotalFieldsCount).sum() + runtimeFields.size();
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java
index a9a31ba585177..d36ca9e0b25c1 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java
@@ -46,6 +46,8 @@
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
+import org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat;
+import org.elasticsearch.index.codec.vectors.ES813Int8FlatVectorFormat;
import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.mapper.ArraySourceValueFetcher;
@@ -842,6 +844,25 @@ public IndexOptions parseIndexOptions(String fieldName, Map indexOpti
MappingParser.checkNoRemainingFields(fieldName, indexOptionsMap);
return new Int8HnswIndexOptions(m, efConstruction, confidenceInterval);
}
+ },
+ FLAT("flat") {
+ @Override
+ public IndexOptions parseIndexOptions(String fieldName, Map indexOptionsMap) {
+ MappingParser.checkNoRemainingFields(fieldName, indexOptionsMap);
+ return new FlatIndexOptions();
+ }
+ },
+ INT8_FLAT("int8_flat") {
+ @Override
+ public IndexOptions parseIndexOptions(String fieldName, Map indexOptionsMap) {
+ Object confidenceIntervalNode = indexOptionsMap.remove("confidence_interval");
+ Float confidenceInterval = null;
+ if (confidenceIntervalNode != null) {
+ confidenceInterval = (float) XContentMapValues.nodeDoubleValue(confidenceIntervalNode);
+ }
+ MappingParser.checkNoRemainingFields(fieldName, indexOptionsMap);
+ return new Int8FlatIndexOption(confidenceInterval);
+ }
};
static Optional fromString(String type) {
@@ -857,6 +878,80 @@ static Optional fromString(String type) {
abstract IndexOptions parseIndexOptions(String fieldName, Map indexOptionsMap);
}
+ private static class Int8FlatIndexOption extends IndexOptions {
+ private final Float confidenceInterval;
+
+ Int8FlatIndexOption(Float confidenceInterval) {
+ super("int8_flat");
+ this.confidenceInterval = confidenceInterval;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field("type", type);
+ if (confidenceInterval != null) {
+ builder.field("confidence_interval", confidenceInterval);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ KnnVectorsFormat getVectorsFormat() {
+ return new ES813Int8FlatVectorFormat(confidenceInterval);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Int8FlatIndexOption that = (Int8FlatIndexOption) o;
+ return Objects.equals(confidenceInterval, that.confidenceInterval);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(confidenceInterval);
+ }
+
+ @Override
+ boolean supportsElementType(ElementType elementType) {
+ return elementType != ElementType.BYTE;
+ }
+ }
+
+ private static class FlatIndexOptions extends IndexOptions {
+
+ FlatIndexOptions() {
+ super("flat");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field("type", type);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ KnnVectorsFormat getVectorsFormat() {
+ return new ES813FlatVectorFormat();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ return o != null && getClass() == o.getClass();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(type);
+ }
+ }
+
private static class Int8HnswIndexOptions extends IndexOptions {
private final int m;
private final int efConstruction;
@@ -1186,7 +1281,6 @@ && isNotUnitVector(squaredMagnitude)) {
case FLOAT -> parentFilter != null
? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter)
: new ESKnnFloatVectorQuery(name(), queryVector, numCands, filter);
-
};
if (similarityThreshold != null) {
diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
index 63cd598caa784..60769fc8fda77 100644
--- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
@@ -280,14 +280,14 @@ public SimpleQueryStringBuilder flags(SimpleQueryStringFlag... flags) {
return this;
}
- /** For testing and serialisation only. */
- SimpleQueryStringBuilder flags(int flags) {
+ /** For testing, builder instance copy, and serialisation only. */
+ public SimpleQueryStringBuilder flags(int flags) {
this.flags = flags;
return this;
}
- /** For testing only: Return the flags set for this query. */
- int flags() {
+ /** For testing and instance copy only: Return the flags set for this query. */
+ public int flags() {
return this.flags;
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java
index 6e8a20b1ad290..cdf55ab187dc6 100644
--- a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java
@@ -41,7 +41,6 @@
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
-import java.util.function.IntFunction;
import java.util.function.Supplier;
import java.util.stream.IntStream;
@@ -50,10 +49,9 @@
*/
public class TermsQueryBuilder extends AbstractQueryBuilder {
public static final String NAME = "terms";
- private static final TransportVersion VERSION_STORE_VALUES_AS_BYTES_REFERENCE = TransportVersions.V_7_12_0;
private final String fieldName;
- private final Values values;
+ private final BinaryValues values;
private final TermsLookup termsLookup;
private final Supplier> supplier;
@@ -147,7 +145,7 @@ public TermsQueryBuilder(String fieldName, Object... values) {
* @param fieldName The field name
* @param values The terms
*/
- public TermsQueryBuilder(String fieldName, Iterable> values) {
+ public TermsQueryBuilder(String fieldName, Collection> values) {
if (Strings.isEmpty(fieldName)) {
throw new IllegalArgumentException("field name cannot be null.");
}
@@ -155,8 +153,8 @@ public TermsQueryBuilder(String fieldName, Iterable> values) {
throw new IllegalArgumentException("No value specified for terms query");
}
this.fieldName = fieldName;
- if (values instanceof Values) {
- this.values = (Values) values;
+ if (values instanceof BinaryValues binaryValues) {
+ this.values = binaryValues;
} else {
this.values = new BinaryValues(values, true);
}
@@ -178,7 +176,7 @@ public TermsQueryBuilder(StreamInput in) throws IOException {
super(in);
this.fieldName = in.readString();
this.termsLookup = in.readOptionalWriteable(TermsLookup::new);
- this.values = Values.readFrom(in);
+ this.values = in.readOptionalWriteable(BinaryValues::new);
this.supplier = null;
}
@@ -189,14 +187,14 @@ protected void doWriteTo(StreamOutput out) throws IOException {
}
out.writeString(fieldName);
out.writeOptionalWriteable(termsLookup);
- Values.writeTo(out, values);
+ out.writeOptionalWriteable(values);
}
public String fieldName() {
return this.fieldName;
}
- public Values getValues() {
+ public BinaryValues getValues() {
return values;
}
@@ -412,116 +410,83 @@ protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throw
return this;
}
+ /**
+ * Store terms as a {@link BytesReference}.
+ *
+ * When users send a query contain a lot of terms, A {@link BytesReference} can help
+ * gc and reduce the cost of {@link #doWriteTo}, which can be slow for lots of terms.
+ */
@SuppressWarnings("rawtypes")
- private abstract static class Values extends AbstractCollection implements Writeable {
+ public static final class BinaryValues extends AbstractCollection implements Writeable {
- private static Values readFrom(StreamInput in) throws IOException {
- if (in.getTransportVersion().onOrAfter(VERSION_STORE_VALUES_AS_BYTES_REFERENCE)) {
- return in.readOptionalWriteable(BinaryValues::new);
- } else {
- List> list = (List>) in.readGenericValue();
- return list == null ? null : new ListValues(list);
- }
+ private final BytesReference valueRef;
+ private final int size;
+
+ private BinaryValues(StreamInput in) throws IOException {
+ this(in.readBytesReference());
}
- private static void writeTo(StreamOutput out, Values values) throws IOException {
- if (out.getTransportVersion().onOrAfter(VERSION_STORE_VALUES_AS_BYTES_REFERENCE)) {
- out.writeOptionalWriteable(values);
- } else {
- if (values == null) {
- out.writeGenericValue(null);
- } else {
- values.writeTo(out);
- }
- }
+ private BinaryValues(Collection> values, boolean convert) {
+ this(serialize(values, convert));
}
- protected static BytesReference serialize(Iterable> values, boolean convert) {
- List> list;
- if (values instanceof List>) {
- list = (List>) values;
- } else {
- ArrayList arrayList = new ArrayList<>();
- for (Object o : values) {
- arrayList.add(o);
- }
- list = arrayList;
- }
+ private static BytesReference serialize(Collection> values, boolean convert) {
try (BytesStreamOutput output = new BytesStreamOutput()) {
+ output.writeByte(StreamOutput.GENERIC_LIST_HEADER);
+ output.writeVInt(values.size());
if (convert) {
- list = list.stream().map(AbstractQueryBuilder::maybeConvertToBytesRef).toList();
+ for (Object value : values) {
+ output.writeGenericValue(AbstractQueryBuilder.maybeConvertToBytesRef(value));
+ }
+ } else {
+ for (Object value : values) {
+ output.writeGenericValue(value);
+ }
}
- output.writeGenericValue(list);
return output.bytes();
} catch (IOException e) {
throw new UncheckedIOException("failed to serialize TermsQueryBuilder", e);
}
}
- @Override
- public final boolean add(Object o) {
- throw new UnsupportedOperationException();
+ private BinaryValues(BytesReference bytesRef) {
+ this.valueRef = bytesRef;
+ try (StreamInput in = valueRef.streamInput()) {
+ size = consumerHeadersAndGetListSize(in);
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
}
@Override
- public final boolean remove(Object o) {
+ public boolean remove(Object o) {
throw new UnsupportedOperationException();
}
@Override
- public final boolean containsAll(Collection c) {
+ public boolean containsAll(Collection c) {
throw new UnsupportedOperationException();
}
@Override
- public final boolean addAll(Collection c) {
+ public boolean addAll(Collection c) {
throw new UnsupportedOperationException();
}
@Override
- public final boolean removeAll(Collection c) {
+ public boolean removeAll(Collection c) {
throw new UnsupportedOperationException();
}
@Override
- public final boolean retainAll(Collection c) {
+ public boolean retainAll(Collection c) {
throw new UnsupportedOperationException();
}
@Override
- public final void clear() {
+ public void clear() {
throw new UnsupportedOperationException();
}
- }
-
- /**
- * Store terms as a {@link BytesReference}.
- *
- * When users send a query contain a lot of terms, A {@link BytesReference} can help
- * gc and reduce the cost of {@link #doWriteTo}, which can be slow for lots of terms.
- */
- @SuppressWarnings("rawtypes")
- private static class BinaryValues extends Values {
-
- private final BytesReference valueRef;
- private final int size;
-
- private BinaryValues(StreamInput in) throws IOException {
- this(in.readBytesReference());
- }
-
- private BinaryValues(Iterable> values, boolean convert) {
- this(serialize(values, convert));
- }
-
- private BinaryValues(BytesReference bytesRef) {
- this.valueRef = bytesRef;
- try (StreamInput in = valueRef.streamInput()) {
- size = consumerHeadersAndGetListSize(in);
- } catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- }
@Override
public int size() {
@@ -562,11 +527,7 @@ public Object next() {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().onOrAfter(VERSION_STORE_VALUES_AS_BYTES_REFERENCE)) {
- out.writeBytesReference(valueRef);
- } else {
- valueRef.writeTo(out);
- }
+ out.writeBytesReference(valueRef);
}
@Override
@@ -584,87 +545,11 @@ public int hashCode() {
private static int consumerHeadersAndGetListSize(StreamInput in) throws IOException {
byte genericSign = in.readByte();
- assert genericSign == 7;
+ assert genericSign == StreamOutput.GENERIC_LIST_HEADER;
return in.readVInt();
}
}
- /**
- * This is for lower version requests compatible.
- *
- * If we do not keep this, it could be expensive when receiving a request from
- * lower version.
- * We have to read the value list by {@link StreamInput#readGenericValue},
- * serialize it into {@link BytesReference}, and then deserialize it again when
- * {@link #doToQuery} called}.
- *
- *
- * TODO: remove in 9.0.0
- */
- @SuppressWarnings("rawtypes")
- private static class ListValues extends Values {
-
- private final List> values;
-
- private ListValues(List> values) throws IOException {
- this.values = values;
- }
-
- @Override
- public int size() {
- return values.size();
- }
-
- @Override
- public boolean contains(Object o) {
- return values.contains(o);
- }
-
- @Override
- public Iterator iterator() {
- return values.iterator();
- }
-
- @Override
- public Object[] toArray() {
- return values.toArray();
- }
-
- @Override
- public Object[] toArray(Object[] a) {
- return values.toArray(a);
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public Object[] toArray(IntFunction generator) {
- return values.toArray(generator);
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- if (out.getTransportVersion().onOrAfter(VERSION_STORE_VALUES_AS_BYTES_REFERENCE)) {
- BytesReference bytesRef = serialize(values, false);
- out.writeBytesReference(bytesRef);
- } else {
- out.writeGenericValue(values);
- }
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ListValues that = (ListValues) o;
- return Objects.equals(values, that.values);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(values);
- }
- }
-
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.ZERO;
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
index ffafc1be6a7ba..0aeb64d5b250f 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
@@ -8,7 +8,7 @@
package org.elasticsearch.index.reindex;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionRequestLazyBuilder;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.support.ActiveShardCount;
@@ -16,20 +16,44 @@
import org.elasticsearch.client.internal.ElasticsearchClient;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE;
+import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT;
public abstract class AbstractBulkByScrollRequestBuilder<
Request extends AbstractBulkByScrollRequest,
- Self extends AbstractBulkByScrollRequestBuilder> extends ActionRequestBuilder {
+ Self extends AbstractBulkByScrollRequestBuilder> extends ActionRequestLazyBuilder {
private final SearchRequestBuilder source;
+ private Integer maxDocs;
+ private Boolean abortOnVersionConflict;
+ private Boolean refresh;
+ private TimeValue timeout;
+ private ActiveShardCount waitForActiveShards;
+ private TimeValue retryBackoffInitialTime;
+ private Integer maxRetries;
+ private Float requestsPerSecond;
+ private Boolean shouldStoreResult;
+ private Integer slices;
protected AbstractBulkByScrollRequestBuilder(
ElasticsearchClient client,
ActionType action,
- SearchRequestBuilder source,
- Request request
+ SearchRequestBuilder source
) {
- super(client, action, request);
+ super(client, action);
this.source = source;
+ initSourceSearchRequest();
+ }
+
+ /*
+ * The following is normally done within the AbstractBulkByScrollRequest constructor. But that constructor is not called until the
+ * request() method is called once this builder is complete. Doing it there blows away changes made to the source request.
+ */
+ private void initSourceSearchRequest() {
+ source.request().scroll(DEFAULT_SCROLL_TIMEOUT);
+ source.request().source(new SearchSourceBuilder());
+ source.request().source().size(DEFAULT_SCROLL_SIZE);
}
protected abstract Self self();
@@ -73,7 +97,7 @@ public Self size(int size) {
* documents.
*/
public Self maxDocs(int maxDocs) {
- request.setMaxDocs(maxDocs);
+ this.maxDocs = maxDocs;
return self();
}
@@ -81,7 +105,7 @@ public Self maxDocs(int maxDocs) {
* Set whether or not version conflicts cause the action to abort.
*/
public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
- request.setAbortOnVersionConflict(abortOnVersionConflict);
+ this.abortOnVersionConflict = abortOnVersionConflict;
return self();
}
@@ -89,7 +113,7 @@ public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
* Call refresh on the indexes we've written to after the request ends?
*/
public Self refresh(boolean refresh) {
- request.setRefresh(refresh);
+ this.refresh = refresh;
return self();
}
@@ -97,7 +121,7 @@ public Self refresh(boolean refresh) {
* Timeout to wait for the shards on to be available for each bulk request.
*/
public Self timeout(TimeValue timeout) {
- request.setTimeout(timeout);
+ this.timeout = timeout;
return self();
}
@@ -106,7 +130,7 @@ public Self timeout(TimeValue timeout) {
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public Self waitForActiveShards(ActiveShardCount activeShardCount) {
- request.setWaitForActiveShards(activeShardCount);
+ this.waitForActiveShards = activeShardCount;
return self();
}
@@ -115,7 +139,7 @@ public Self waitForActiveShards(ActiveShardCount activeShardCount) {
* is about one minute per bulk request. Once the entire bulk request is successful the retry counter resets.
*/
public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
- request.setRetryBackoffInitialTime(retryBackoffInitialTime);
+ this.retryBackoffInitialTime = retryBackoffInitialTime;
return self();
}
@@ -123,7 +147,7 @@ public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
* Total number of retries attempted for rejections. There is no way to ask for unlimited retries.
*/
public Self setMaxRetries(int maxRetries) {
- request.setMaxRetries(maxRetries);
+ this.maxRetries = maxRetries;
return self();
}
@@ -133,7 +157,7 @@ public Self setMaxRetries(int maxRetries) {
* make sure that it contains any time that we might wait.
*/
public Self setRequestsPerSecond(float requestsPerSecond) {
- request.setRequestsPerSecond(requestsPerSecond);
+ this.requestsPerSecond = requestsPerSecond;
return self();
}
@@ -141,7 +165,7 @@ public Self setRequestsPerSecond(float requestsPerSecond) {
* Should this task store its result after it has finished?
*/
public Self setShouldStoreResult(boolean shouldStoreResult) {
- request.setShouldStoreResult(shouldStoreResult);
+ this.shouldStoreResult = shouldStoreResult;
return self();
}
@@ -149,7 +173,40 @@ public Self setShouldStoreResult(boolean shouldStoreResult) {
* The number of slices this task should be divided into. Defaults to 1 meaning the task isn't sliced into subtasks.
*/
public Self setSlices(int slices) {
- request.setSlices(slices);
+ this.slices = slices;
return self();
}
+
+ protected void apply(Request request) {
+ if (maxDocs != null) {
+ request.setMaxDocs(maxDocs);
+ }
+ if (abortOnVersionConflict != null) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ }
+ if (refresh != null) {
+ request.setRefresh(refresh);
+ }
+ if (timeout != null) {
+ request.setTimeout(timeout);
+ }
+ if (waitForActiveShards != null) {
+ request.setWaitForActiveShards(waitForActiveShards);
+ }
+ if (retryBackoffInitialTime != null) {
+ request.setRetryBackoffInitialTime(retryBackoffInitialTime);
+ }
+ if (maxRetries != null) {
+ request.setMaxRetries(maxRetries);
+ }
+ if (requestsPerSecond != null) {
+ request.setRequestsPerSecond(requestsPerSecond);
+ }
+ if (shouldStoreResult != null) {
+ request.setShouldStoreResult(shouldStoreResult);
+ }
+ if (slices != null) {
+ request.setSlices(slices);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
index 53e878b643517..30114b1472dd5 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
@@ -16,21 +16,29 @@
public abstract class AbstractBulkIndexByScrollRequestBuilder<
Request extends AbstractBulkIndexByScrollRequest,
Self extends AbstractBulkIndexByScrollRequestBuilder> extends AbstractBulkByScrollRequestBuilder {
+ private Script script;
protected AbstractBulkIndexByScrollRequestBuilder(
ElasticsearchClient client,
ActionType action,
- SearchRequestBuilder search,
- Request request
+ SearchRequestBuilder search
) {
- super(client, action, search, request);
+ super(client, action, search);
}
/**
* Script to modify the documents before they are processed.
*/
public Self script(Script script) {
- request.setScript(script);
+ this.script = script;
return self();
}
+
+ @Override
+ public void apply(Request request) {
+ super.apply(request);
+ if (script != null) {
+ request.setScript(script);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java
index 85424c2eef7d2..6243859ec0e33 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java
@@ -60,7 +60,7 @@ public DeleteByQueryRequest(StreamInput in) throws IOException {
super(in);
}
- private DeleteByQueryRequest(SearchRequest search, boolean setDefaults) {
+ DeleteByQueryRequest(SearchRequest search, boolean setDefaults) {
super(search, setDefaults);
// Delete-By-Query does not require the source
if (setDefaults) {
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java
index 49d3c660a4b68..3452c6659a392 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java
@@ -8,17 +8,21 @@
package org.elasticsearch.index.reindex;
+import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
public class DeleteByQueryRequestBuilder extends AbstractBulkByScrollRequestBuilder {
+ private Boolean abortOnVersionConflict;
+
public DeleteByQueryRequestBuilder(ElasticsearchClient client) {
this(client, new SearchRequestBuilder(client));
}
private DeleteByQueryRequestBuilder(ElasticsearchClient client, SearchRequestBuilder search) {
- super(client, DeleteByQueryAction.INSTANCE, search, new DeleteByQueryRequest(search.request()));
+ super(client, DeleteByQueryAction.INSTANCE, search);
+ source().setFetchSource(false);
}
@Override
@@ -28,7 +32,33 @@ protected DeleteByQueryRequestBuilder self() {
@Override
public DeleteByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
- request.setAbortOnVersionConflict(abortOnVersionConflict);
+ this.abortOnVersionConflict = abortOnVersionConflict;
return this;
}
+
+ @Override
+ public DeleteByQueryRequest request() {
+ SearchRequest search = source().request();
+ try {
+ DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(search, false);
+ try {
+ apply(deleteByQueryRequest);
+ return deleteByQueryRequest;
+ } catch (Exception e) {
+ deleteByQueryRequest.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ search.decRef();
+ throw e;
+ }
+ }
+
+ @Override
+ public void apply(DeleteByQueryRequest request) {
+ super.apply(request);
+ if (abortOnVersionConflict != null) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java
index a1f741d7d51d6..683ec75c57d76 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java
@@ -68,7 +68,7 @@ public ReindexRequest() {
this(search, destination, true);
}
- private ReindexRequest(SearchRequest search, IndexRequest destination, boolean setDefaults) {
+ ReindexRequest(SearchRequest search, IndexRequest destination, boolean setDefaults) {
super(search, setDefaults);
this.destination = destination;
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java
index 88a851bee15e0..156b39d608654 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java
@@ -8,20 +8,23 @@
package org.elasticsearch.index.reindex;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
public class ReindexRequestBuilder extends AbstractBulkIndexByScrollRequestBuilder {
- private final IndexRequestBuilder destination;
+ private final IndexRequestBuilder destinationBuilder;
+ private RemoteInfo remoteInfo;
public ReindexRequestBuilder(ElasticsearchClient client) {
this(client, new SearchRequestBuilder(client), new IndexRequestBuilder(client));
}
private ReindexRequestBuilder(ElasticsearchClient client, SearchRequestBuilder search, IndexRequestBuilder destination) {
- super(client, ReindexAction.INSTANCE, search, new ReindexRequest(search.request(), destination.request()));
- this.destination = destination;
+ super(client, ReindexAction.INSTANCE, search);
+ this.destinationBuilder = destination;
}
@Override
@@ -30,14 +33,14 @@ protected ReindexRequestBuilder self() {
}
public IndexRequestBuilder destination() {
- return destination;
+ return destinationBuilder;
}
/**
* Set the destination index.
*/
public ReindexRequestBuilder destination(String index) {
- destination.setIndex(index);
+ destinationBuilder.setIndex(index);
return this;
}
@@ -45,7 +48,34 @@ public ReindexRequestBuilder destination(String index) {
* Setup reindexing from a remote cluster.
*/
public ReindexRequestBuilder setRemoteInfo(RemoteInfo remoteInfo) {
- request().setRemoteInfo(remoteInfo);
+ this.remoteInfo = remoteInfo;
return this;
}
+
+ @Override
+ public ReindexRequest request() {
+ SearchRequest source = source().request();
+ try {
+ IndexRequest destination = destinationBuilder.request();
+ try {
+ ReindexRequest reindexRequest = new ReindexRequest(source, destination, false);
+ try {
+ super.apply(reindexRequest);
+ if (remoteInfo != null) {
+ reindexRequest.setRemoteInfo(remoteInfo);
+ }
+ return reindexRequest;
+ } catch (Exception e) {
+ reindexRequest.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ destination.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ source.decRef();
+ throw e;
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java
index d30b54fdafd42..44b959074ed76 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java
@@ -52,7 +52,7 @@ public UpdateByQueryRequest(StreamInput in) throws IOException {
pipeline = in.readOptionalString();
}
- private UpdateByQueryRequest(SearchRequest search, boolean setDefaults) {
+ UpdateByQueryRequest(SearchRequest search, boolean setDefaults) {
super(search, setDefaults);
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java
index b63ebdf1def86..270014d6ab3f2 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java
@@ -8,6 +8,7 @@
package org.elasticsearch.index.reindex;
+import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
@@ -15,12 +16,15 @@ public class UpdateByQueryRequestBuilder extends AbstractBulkIndexByScrollReques
UpdateByQueryRequest,
UpdateByQueryRequestBuilder> {
+ private Boolean abortOnVersionConflict;
+ private String pipeline;
+
public UpdateByQueryRequestBuilder(ElasticsearchClient client) {
this(client, new SearchRequestBuilder(client));
}
private UpdateByQueryRequestBuilder(ElasticsearchClient client, SearchRequestBuilder search) {
- super(client, UpdateByQueryAction.INSTANCE, search, new UpdateByQueryRequest(search.request()));
+ super(client, UpdateByQueryAction.INSTANCE, search);
}
@Override
@@ -30,12 +34,41 @@ protected UpdateByQueryRequestBuilder self() {
@Override
public UpdateByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
- request.setAbortOnVersionConflict(abortOnVersionConflict);
+ this.abortOnVersionConflict = abortOnVersionConflict;
return this;
}
public UpdateByQueryRequestBuilder setPipeline(String pipeline) {
- request.setPipeline(pipeline);
+ this.pipeline = pipeline;
return this;
}
+
+ @Override
+ public UpdateByQueryRequest request() {
+ SearchRequest search = source().request();
+ try {
+ UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(search, false);
+ try {
+ apply(updateByQueryRequest);
+ return updateByQueryRequest;
+ } catch (Exception e) {
+ updateByQueryRequest.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ search.decRef();
+ throw e;
+ }
+ }
+
+ @Override
+ public void apply(UpdateByQueryRequest request) {
+ super.apply(request);
+ if (abortOnVersionConflict != null) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ }
+ if (pipeline != null) {
+ request.setPipeline(pipeline);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java
index 26c8eac53b0fb..605c799a2ba99 100644
--- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java
+++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java
@@ -29,18 +29,24 @@ default void init(Client client) {}
* {@code service_settings} field.
* This function modifies {@code config map}, fields are removed
* from the map as they are read.
- *
+ *
* If the map contains unrecognized configuration option an
* {@code ElasticsearchStatusException} is thrown.
*
- * @param modelId Model Id
- * @param taskType The model task type
- * @param config Configuration options including the secrets
+ * @param modelId Model Id
+ * @param taskType The model task type
+ * @param config Configuration options including the secrets
* @param platfromArchitectures The Set of platform architectures (OS name and hardware architecture)
- * the cluster nodes and models are running on.
- * @return The parsed {@link Model}
+ * the cluster nodes and models are running on.
+ * @param parsedModelListener A listener which will handle the resulting model or failure
*/
- Model parseRequestConfig(String modelId, TaskType taskType, Map config, Set platfromArchitectures);
+ void parseRequestConfig(
+ String modelId,
+ TaskType taskType,
+ Map config,
+ Set platfromArchitectures,
+ ActionListener parsedModelListener
+ );
/**
* Parse model configuration from {@code config map} from persisted storage and return the parsed {@link Model}. This requires that
diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java
index 3a2a810dc61b5..1f82ebd786e98 100644
--- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java
+++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java
@@ -41,6 +41,7 @@
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.MasterServiceTaskQueue;
import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.TriConsumer;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
@@ -668,10 +669,41 @@ void validatePipeline(Map ingestInfos, String pipelin
ExceptionsHelper.rethrowAndSuppress(exceptions);
}
+ private record IngestPipelinesExecutionResult(boolean success, boolean shouldKeep, Exception exception, String failedIndex) {
+
+ private static final IngestPipelinesExecutionResult SUCCESSFUL_RESULT = new IngestPipelinesExecutionResult(true, true, null, null);
+ private static final IngestPipelinesExecutionResult DISCARD_RESULT = new IngestPipelinesExecutionResult(true, false, null, null);
+ private static IngestPipelinesExecutionResult failAndStoreFor(String index, Exception e) {
+ return new IngestPipelinesExecutionResult(false, true, e, index);
+ }
+ }
+
+ /**
+ * Executes all applicable pipelines for a collection of documents.
+ * @param numberOfActionRequests The total number of requests to process.
+ * @param actionRequests The collection of requests to be processed.
+ * @param onDropped A callback executed when a document is dropped by a pipeline.
+ * Accepts the slot in the collection of requests that the document occupies.
+ * @param shouldStoreFailure A predicate executed on each ingest failure to determine if the
+ * failure should be stored somewhere.
+ * @param onStoreFailure A callback executed when a document fails ingest but the failure should
+ * be persisted elsewhere. Accepts the slot in the collection of requests
+ * that the document occupies, the index name that the request was targeting
+ * at the time of failure, and the exception that the document encountered.
+ * @param onFailure A callback executed when a document fails ingestion and does not need to be
+ * persisted. Accepts the slot in the collection of requests that the document
+ * occupies, and the exception that the document encountered.
+ * @param onCompletion A callback executed once all documents have been processed. Accepts the thread
+ * that ingestion completed on or an exception in the event that the entire operation
+ * has failed.
+ * @param executorName Which executor the bulk request should be executed on.
+ */
public void executeBulkRequest(
final int numberOfActionRequests,
final Iterable> actionRequests,
final IntConsumer onDropped,
+ final Predicate shouldStoreFailure,
+ final TriConsumer onStoreFailure,
final BiConsumer onFailure,
final BiConsumer onCompletion,
final String executorName
@@ -708,34 +740,45 @@ protected void doRun() {
totalMetrics.preIngest();
final int slot = i;
final Releasable ref = refs.acquire();
+ DocumentParsingObserver documentParsingObserver = documentParsingObserverSupplier.get();
+ final IngestDocument ingestDocument = newIngestDocument(indexRequest, documentParsingObserver);
+ final org.elasticsearch.script.Metadata originalDocumentMetadata = ingestDocument.getMetadata().clone();
// the document listener gives us three-way logic: a document can fail processing (1), or it can
// be successfully processed. a successfully processed document can be kept (2) or dropped (3).
- final ActionListener documentListener = ActionListener.runAfter(new ActionListener<>() {
- @Override
- public void onResponse(Boolean kept) {
- assert kept != null;
- if (kept == false) {
- onDropped.accept(slot);
+ final ActionListener documentListener = ActionListener.runAfter(
+ new ActionListener<>() {
+ @Override
+ public void onResponse(IngestPipelinesExecutionResult result) {
+ assert result != null;
+ if (result.success) {
+ if (result.shouldKeep == false) {
+ onDropped.accept(slot);
+ }
+ } else {
+ // We were given a failure result in the onResponse method, so we must store the failure
+ // Recover the original document state, track a failed ingest, and pass it along
+ updateIndexRequestMetadata(indexRequest, originalDocumentMetadata);
+ totalMetrics.ingestFailed();
+ onStoreFailure.apply(slot, result.failedIndex, result.exception);
+ }
}
- }
- @Override
- public void onFailure(Exception e) {
- totalMetrics.ingestFailed();
- onFailure.accept(slot, e);
+ @Override
+ public void onFailure(Exception e) {
+ totalMetrics.ingestFailed();
+ onFailure.accept(slot, e);
+ }
+ },
+ () -> {
+ // regardless of success or failure, we always stop the ingest "stopwatch" and release the ref to indicate
+ // that we're finished with this document
+ final long ingestTimeInNanos = System.nanoTime() - startTimeInNanos;
+ totalMetrics.postIngest(ingestTimeInNanos);
+ ref.close();
}
- }, () -> {
- // regardless of success or failure, we always stop the ingest "stopwatch" and release the ref to indicate
- // that we're finished with this document
- final long ingestTimeInNanos = System.nanoTime() - startTimeInNanos;
- totalMetrics.postIngest(ingestTimeInNanos);
- ref.close();
- });
- DocumentParsingObserver documentParsingObserver = documentParsingObserverSupplier.get();
-
- IngestDocument ingestDocument = newIngestDocument(indexRequest, documentParsingObserver);
+ );
- executePipelines(pipelines, indexRequest, ingestDocument, documentListener);
+ executePipelines(pipelines, indexRequest, ingestDocument, shouldStoreFailure, documentListener);
indexRequest.setPipelinesHaveRun();
assert actionRequest.index() != null;
@@ -825,7 +868,8 @@ private void executePipelines(
final PipelineIterator pipelines,
final IndexRequest indexRequest,
final IngestDocument ingestDocument,
- final ActionListener listener
+ final Predicate shouldStoreFailure,
+ final ActionListener listener
) {
assert pipelines.hasNext();
PipelineSlot slot = pipelines.next();
@@ -835,13 +879,20 @@ private void executePipelines(
// reset the reroute flag, at the start of a new pipeline execution this document hasn't been rerouted yet
ingestDocument.resetReroute();
+ final String originalIndex = indexRequest.indices()[0];
+ final Consumer exceptionHandler = (Exception e) -> {
+ if (shouldStoreFailure.test(originalIndex)) {
+ listener.onResponse(IngestPipelinesExecutionResult.failAndStoreFor(originalIndex, e));
+ } else {
+ listener.onFailure(e);
+ }
+ };
try {
if (pipeline == null) {
throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist");
}
indexRequest.addPipeline(pipelineId);
- final String originalIndex = indexRequest.indices()[0];
executePipeline(ingestDocument, pipeline, (keep, e) -> {
assert keep != null;
@@ -855,12 +906,12 @@ private void executePipelines(
),
e
);
- listener.onFailure(e);
+ exceptionHandler.accept(e);
return; // document failed!
}
if (keep == false) {
- listener.onResponse(false);
+ listener.onResponse(IngestPipelinesExecutionResult.DISCARD_RESULT);
return; // document dropped!
}
@@ -875,7 +926,7 @@ private void executePipelines(
} catch (IllegalArgumentException ex) {
// An IllegalArgumentException can be thrown when an ingest processor creates a source map that is self-referencing.
// In that case, we catch and wrap the exception, so we can include more details
- listener.onFailure(
+ exceptionHandler.accept(
new IllegalArgumentException(
format(
"Failed to generate the source document for ingest pipeline [%s] for document [%s/%s]",
@@ -895,7 +946,7 @@ private void executePipelines(
if (Objects.equals(originalIndex, newIndex) == false) {
// final pipelines cannot change the target index (either directly or by way of a reroute)
if (isFinalPipeline) {
- listener.onFailure(
+ exceptionHandler.accept(
new IllegalStateException(
format(
"final pipeline [%s] can't change the target index (from [%s] to [%s]) for document [%s]",
@@ -914,7 +965,7 @@ private void executePipelines(
if (cycle) {
List indexCycle = new ArrayList<>(ingestDocument.getIndexHistory());
indexCycle.add(newIndex);
- listener.onFailure(
+ exceptionHandler.accept(
new IllegalStateException(
format(
"index cycle detected while processing pipeline [%s] for document [%s]: %s",
@@ -941,12 +992,12 @@ private void executePipelines(
}
if (newPipelines.hasNext()) {
- executePipelines(newPipelines, indexRequest, ingestDocument, listener);
+ executePipelines(newPipelines, indexRequest, ingestDocument, shouldStoreFailure, listener);
} else {
// update the index request's source and (potentially) cache the timestamp for TSDB
updateIndexRequestSource(indexRequest, ingestDocument);
cacheRawTimestamp(indexRequest, ingestDocument);
- listener.onResponse(true); // document succeeded!
+ listener.onResponse(IngestPipelinesExecutionResult.SUCCESSFUL_RESULT); // document succeeded!
}
});
} catch (Exception e) {
@@ -954,7 +1005,7 @@ private void executePipelines(
() -> format("failed to execute pipeline [%s] for document [%s/%s]", pipelineId, indexRequest.index(), indexRequest.id()),
e
);
- listener.onFailure(e); // document failed!
+ exceptionHandler.accept(e); // document failed
}
}
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/BinaryShapeDocValuesField.java b/server/src/main/java/org/elasticsearch/lucene/spatial/BinaryShapeDocValuesField.java
similarity index 79%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/BinaryShapeDocValuesField.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/BinaryShapeDocValuesField.java
index eb8567f91cafb..70ca08482c15a 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/BinaryShapeDocValuesField.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/BinaryShapeDocValuesField.java
@@ -1,20 +1,18 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.mapper;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.geometry.Geometry;
import org.elasticsearch.index.mapper.CustomDocValuesField;
-import org.elasticsearch.xpack.spatial.index.fielddata.CentroidCalculator;
-import org.elasticsearch.xpack.spatial.index.fielddata.CoordinateEncoder;
-import org.elasticsearch.xpack.spatial.index.fielddata.GeometryDocValueWriter;
import java.io.IOException;
import java.util.ArrayList;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeCoordinateEncoder.java b/server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeCoordinateEncoder.java
similarity index 85%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeCoordinateEncoder.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeCoordinateEncoder.java
index f8a7ebe8b150d..aa043f8c401be 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeCoordinateEncoder.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeCoordinateEncoder.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.geo.XYEncodingUtils;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQuery.java b/server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQuery.java
similarity index 79%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQuery.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQuery.java
index a3c50d5cb6162..5d377ea97e21b 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQuery.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQuery.java
@@ -1,16 +1,16 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.mapper;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.document.ShapeField;
import org.apache.lucene.geo.Component2D;
import org.apache.lucene.geo.XYGeometry;
-import org.elasticsearch.xpack.spatial.index.fielddata.CoordinateEncoder;
/** Lucene geometry query for {@link BinaryShapeDocValuesField}. */
public class CartesianShapeDocValuesQuery extends ShapeDocValuesQuery {
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java b/server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeIndexer.java
similarity index 92%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeIndexer.java
index c23d63baa5791..ca18d2a854dbb 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeIndexer.java
@@ -1,10 +1,11 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.mapper;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.document.XYShape;
import org.apache.lucene.index.IndexableField;
@@ -47,7 +48,7 @@ public List indexShape(Geometry shape) {
return visitor.fields;
}
- private class LuceneGeometryVisitor implements GeometryVisitor {
+ private static class LuceneGeometryVisitor implements GeometryVisitor {
private List fields = new ArrayList<>();
private String name;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CentroidCalculator.java b/server/src/main/java/org/elasticsearch/lucene/spatial/CentroidCalculator.java
similarity index 97%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CentroidCalculator.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/CentroidCalculator.java
index c00cfdba4d3c1..b63f650a539d8 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CentroidCalculator.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/CentroidCalculator.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.elasticsearch.geometry.Circle;
import org.elasticsearch.geometry.Geometry;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java b/server/src/main/java/org/elasticsearch/lucene/spatial/Component2DVisitor.java
similarity index 95%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/Component2DVisitor.java
index b5804d18a4e6e..ea79f77c1a075 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/Component2DVisitor.java
@@ -1,20 +1,21 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.document.ShapeField;
import org.apache.lucene.geo.Component2D;
import org.apache.lucene.index.PointValues;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeVisitor.TriangleTreeDecodedVisitor;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeVisitor.abFromTriangle;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeVisitor.bcFromTriangle;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeVisitor.caFromTriangle;
+import static org.elasticsearch.lucene.spatial.TriangleTreeVisitor.TriangleTreeDecodedVisitor;
+import static org.elasticsearch.lucene.spatial.TriangleTreeVisitor.abFromTriangle;
+import static org.elasticsearch.lucene.spatial.TriangleTreeVisitor.bcFromTriangle;
+import static org.elasticsearch.lucene.spatial.TriangleTreeVisitor.caFromTriangle;
/**
* A {@link TriangleTreeDecodedVisitor} implementation for {@link Component2D} geometries.
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CoordinateEncoder.java b/server/src/main/java/org/elasticsearch/lucene/spatial/CoordinateEncoder.java
similarity index 78%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CoordinateEncoder.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/CoordinateEncoder.java
index 1458282fc335d..e10687246277b 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CoordinateEncoder.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/CoordinateEncoder.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
/**
* Interface for classes that help encode double-valued spatial coordinates x/y to
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/DimensionalShapeType.java b/server/src/main/java/org/elasticsearch/lucene/spatial/DimensionalShapeType.java
similarity index 82%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/DimensionalShapeType.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/DimensionalShapeType.java
index 4cf8895893738..09be37653b14f 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/DimensionalShapeType.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/DimensionalShapeType.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.elasticsearch.common.io.stream.ByteArrayStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Extent.java b/server/src/main/java/org/elasticsearch/lucene/spatial/Extent.java
similarity index 96%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Extent.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/Extent.java
index 1c8be0e3b806a..a5d7a81410089 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Extent.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/Extent.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -16,7 +17,7 @@
/**
* Object representing the extent of a geometry object within a {@link TriangleTreeWriter}.
*/
-class Extent {
+public class Extent {
public int top;
public int bottom;
@@ -206,7 +207,7 @@ public static Extent fromPoint(int x, int y) {
* @param topRightY the top-right y-coordinate
* @return the extent of the two points
*/
- static Extent fromPoints(int bottomLeftX, int bottomLeftY, int topRightX, int topRightY) {
+ public static Extent fromPoints(int bottomLeftX, int bottomLeftY, int topRightX, int topRightY) {
int negLeft = Integer.MAX_VALUE;
int negRight = Integer.MIN_VALUE;
int posLeft = Integer.MAX_VALUE;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeCoordinateEncoder.java b/server/src/main/java/org/elasticsearch/lucene/spatial/GeoShapeCoordinateEncoder.java
similarity index 85%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeCoordinateEncoder.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/GeoShapeCoordinateEncoder.java
index ead1b44abe51e..29067d41ac9d3 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeCoordinateEncoder.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/GeoShapeCoordinateEncoder.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.elasticsearch.common.geo.GeoUtils;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java b/server/src/main/java/org/elasticsearch/lucene/spatial/GeometryDocValueReader.java
similarity index 87%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/GeometryDocValueReader.java
index 16b655a1ad034..25c9a580f8dc1 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/GeometryDocValueReader.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.stream.ByteArrayStreamInput;
@@ -60,7 +61,7 @@ public void reset(BytesRef bytesRef) throws IOException {
/**
* returns the {@link Extent} of this geometry.
*/
- protected Extent getExtent() throws IOException {
+ public Extent getExtent() throws IOException {
if (treeOffset == 0) {
getSumCentroidWeight(); // skip CENTROID_HEADER + var-long sum-weight
Extent.readFromCompressed(input, extent);
@@ -74,7 +75,7 @@ protected Extent getExtent() throws IOException {
/**
* returns the encoded X coordinate of the centroid.
*/
- protected int getCentroidX() throws IOException {
+ public int getCentroidX() throws IOException {
input.setPosition(docValueOffset + 0);
return input.readInt();
}
@@ -82,17 +83,17 @@ protected int getCentroidX() throws IOException {
/**
* returns the encoded Y coordinate of the centroid.
*/
- protected int getCentroidY() throws IOException {
+ public int getCentroidY() throws IOException {
input.setPosition(docValueOffset + 4);
return input.readInt();
}
- protected DimensionalShapeType getDimensionalShapeType() {
+ public DimensionalShapeType getDimensionalShapeType() {
input.setPosition(docValueOffset + 8);
return DimensionalShapeType.readFrom(input);
}
- protected double getSumCentroidWeight() throws IOException {
+ public double getSumCentroidWeight() throws IOException {
input.setPosition(docValueOffset + 9);
return Double.longBitsToDouble(input.readVLong());
}
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueWriter.java b/server/src/main/java/org/elasticsearch/lucene/spatial/GeometryDocValueWriter.java
similarity index 85%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueWriter.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/GeometryDocValueWriter.java
index d168ca9563b57..135bdb931bb84 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueWriter.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/GeometryDocValueWriter.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.util.BytesRef;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/LatLonShapeDocValuesQuery.java b/server/src/main/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQuery.java
similarity index 73%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/LatLonShapeDocValuesQuery.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQuery.java
index 6926148f50314..b1d682e181e21 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/LatLonShapeDocValuesQuery.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQuery.java
@@ -1,24 +1,24 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.mapper;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.document.ShapeField;
import org.apache.lucene.geo.Component2D;
import org.apache.lucene.geo.LatLonGeometry;
import org.apache.lucene.geo.Rectangle;
-import org.elasticsearch.xpack.spatial.index.fielddata.CoordinateEncoder;
import java.util.List;
/** Lucene geometry query for {@link BinaryShapeDocValuesField}. */
-class LatLonShapeDocValuesQuery extends ShapeDocValuesQuery {
+public class LatLonShapeDocValuesQuery extends ShapeDocValuesQuery {
- LatLonShapeDocValuesQuery(String field, ShapeField.QueryRelation relation, LatLonGeometry... geometries) {
+ public LatLonShapeDocValuesQuery(String field, ShapeField.QueryRelation relation, LatLonGeometry... geometries) {
super(field, CoordinateEncoder.GEO, relation, geometries);
}
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeDocValuesQuery.java b/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java
similarity index 96%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeDocValuesQuery.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java
index 968ee86f3429e..6804901d9511e 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeDocValuesQuery.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.mapper;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.document.ShapeField;
import org.apache.lucene.geo.Component2D;
@@ -22,9 +23,6 @@
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
-import org.elasticsearch.xpack.spatial.index.fielddata.Component2DVisitor;
-import org.elasticsearch.xpack.spatial.index.fielddata.CoordinateEncoder;
-import org.elasticsearch.xpack.spatial.index.fielddata.GeometryDocValueReader;
import java.io.IOException;
import java.util.ArrayList;
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java b/server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeReader.java
similarity index 90%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeReader.java
index 941bffc7442df..5ed80d61a39d2 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeReader.java
@@ -1,20 +1,21 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.elasticsearch.common.io.stream.ByteArrayStreamInput;
import java.io.IOException;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.LEFT;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.LINE;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.POINT;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.RIGHT;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.LEFT;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.LINE;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.POINT;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.RIGHT;
/**
* A tree reader for a previous serialized {@link org.elasticsearch.geometry.Geometry} using
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeVisitor.java b/server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeVisitor.java
similarity index 91%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeVisitor.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeVisitor.java
index 5afb2862cfeea..3156203125f24 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeVisitor.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeVisitor.java
@@ -1,15 +1,16 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.AB_FROM_TRIANGLE;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.BC_FROM_TRIANGLE;
-import static org.elasticsearch.xpack.spatial.index.fielddata.TriangleTreeWriter.CA_FROM_TRIANGLE;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.AB_FROM_TRIANGLE;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.BC_FROM_TRIANGLE;
+import static org.elasticsearch.lucene.spatial.TriangleTreeWriter.CA_FROM_TRIANGLE;
/** Visitor for triangle interval tree.
*
diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeWriter.java b/server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeWriter.java
similarity index 97%
rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeWriter.java
rename to server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeWriter.java
index a69f0f6d73365..88b2de5f6e8e1 100644
--- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeWriter.java
+++ b/server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeWriter.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.spatial.index.fielddata;
+package org.elasticsearch.lucene.spatial;
import org.apache.lucene.document.ShapeField;
import org.apache.lucene.index.IndexableField;
@@ -22,7 +23,7 @@
* This is a tree-writer that serializes a list of {@link ShapeField.DecodedTriangle} as an interval tree
* into a byte array.
*/
-class TriangleTreeWriter {
+public class TriangleTreeWriter {
static final byte LEFT = 1;
static final byte RIGHT = 1 << 1;
diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
index 24c8b87bcff50..02c59a6f015ed 100644
--- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
+++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
@@ -98,6 +98,9 @@
import org.elasticsearch.health.node.LocalHealthMonitor;
import org.elasticsearch.health.node.ShardsCapacityHealthIndicatorService;
import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor;
+import org.elasticsearch.health.node.tracker.DiskHealthTracker;
+import org.elasticsearch.health.node.tracker.HealthTracker;
+import org.elasticsearch.health.node.tracker.RepositoriesHealthTracker;
import org.elasticsearch.health.stats.HealthApiStats;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.IndexSettingProvider;
@@ -1018,7 +1021,8 @@ record PluginServiceInstances(
transportService,
featureService,
threadPool,
- telemetryProvider
+ telemetryProvider,
+ repositoryService
)
);
@@ -1171,7 +1175,8 @@ private Module loadDiagnosticServices(
TransportService transportService,
FeatureService featureService,
ThreadPool threadPool,
- TelemetryProvider telemetryProvider
+ TelemetryProvider telemetryProvider,
+ RepositoriesService repositoriesService
) {
MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService);
@@ -1203,13 +1208,18 @@ private Module loadDiagnosticServices(
telemetryProvider
);
HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings);
+
+ List> healthTrackers = List.of(
+ new DiskHealthTracker(nodeService, clusterService),
+ new RepositoriesHealthTracker(repositoriesService)
+ );
LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create(
settings,
clusterService,
- nodeService,
threadPool,
client,
- featureService
+ featureService,
+ healthTrackers
);
HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService);
diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java
index dde044bf15115..5bee2d4a557b2 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchHit.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java
@@ -55,6 +55,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -578,25 +579,24 @@ public void resolveLookupFields(Map> lookupResults) {
if (lookupResults.isEmpty()) {
return;
}
- final List fields = new ArrayList<>(documentFields.keySet());
- for (String field : fields) {
- documentFields.computeIfPresent(field, (k, docField) -> {
- if (docField.getLookupFields().isEmpty()) {
- return docField;
- }
- final List newValues = new ArrayList<>(docField.getValues());
- for (LookupField lookupField : docField.getLookupFields()) {
- final List resolvedValues = lookupResults.get(lookupField);
- if (resolvedValues != null) {
- newValues.addAll(resolvedValues);
- }
- }
- if (newValues.isEmpty() && docField.getIgnoredValues().isEmpty()) {
- return null;
- } else {
- return new DocumentField(docField.getName(), newValues, docField.getIgnoredValues());
+ for (Iterator> iterator = documentFields.entrySet().iterator(); iterator.hasNext();) {
+ Map.Entry entry = iterator.next();
+ final DocumentField docField = entry.getValue();
+ if (docField.getLookupFields().isEmpty()) {
+ continue;
+ }
+ final List newValues = new ArrayList<>(docField.getValues());
+ for (LookupField lookupField : docField.getLookupFields()) {
+ final List resolvedValues = lookupResults.get(lookupField);
+ if (resolvedValues != null) {
+ newValues.addAll(resolvedValues);
}
- });
+ }
+ if (newValues.isEmpty() && docField.getIgnoredValues().isEmpty()) {
+ iterator.remove();
+ } else {
+ entry.setValue(new DocumentField(docField.getName(), newValues, docField.getIgnoredValues()));
+ }
}
assert hasLookupFields() == false : "Some lookup fields are not resolved";
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java
index dda632e7aa020..8f6987dfa6be1 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java
@@ -15,6 +15,7 @@
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree;
import java.io.IOException;
+import java.util.AbstractList;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -77,6 +78,25 @@ protected InternalMultiBucketAggregation(StreamInput in) throws IOException {
*/
protected abstract B reduceBucket(List buckets, AggregationReduceContext context);
+ /** Helps to lazily construct the aggregation list for reduction */
+ protected static class BucketAggregationList extends AbstractList {
+ private final List buckets;
+
+ public BucketAggregationList(List buckets) {
+ this.buckets = buckets;
+ }
+
+ @Override
+ public InternalAggregations get(int index) {
+ return buckets.get(index).getAggregations();
+ }
+
+ @Override
+ public int size() {
+ return buckets.size();
+ }
+ }
+
@Override
public abstract List getBuckets();
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java
index 922baf1f83f83..e9dc079edaf14 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java
@@ -283,19 +283,18 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregations = new ArrayList<>(buckets.size());
+ assert buckets.isEmpty() == false;
long docCount = 0;
for (InternalBucket bucket : buckets) {
docCount += bucket.docCount;
- aggregations.add(bucket.aggregations);
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
/* Use the formats from the bucket because they'll be right to format
* the key. The formats on the InternalComposite doing the reducing are
* just whatever formats make sense for *its* index. This can be real
* trouble when the index doing the reducing is unmapped. */
- var reducedFormats = buckets.get(0).formats;
+ final var reducedFormats = buckets.get(0).formats;
return new InternalBucket(sourceNames, reducedFormats, buckets.get(0).key, reverseMuls, missingOrders, docCount, aggs);
}
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java
similarity index 93%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java
index 31be7f149831d..4f71c964ebaf9 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
@@ -56,7 +57,7 @@ public CountedTermsAggregationBuilder(String name) {
super(name);
}
- protected CountedTermsAggregationBuilder(
+ public CountedTermsAggregationBuilder(
ValuesSourceAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map metadata
@@ -64,7 +65,7 @@ protected CountedTermsAggregationBuilder(
super(clone, factoriesBuilder, metadata);
}
- protected CountedTermsAggregationBuilder(StreamInput in) throws IOException {
+ public CountedTermsAggregationBuilder(StreamInput in) throws IOException {
super(in);
bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in);
}
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
similarity index 96%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
index 5e1b1e3624f00..588c53a2d1463 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.BytesRef;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorFactory.java
similarity index 95%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorFactory.java
index 3b8be76f14da8..430e28e96d5ee 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorFactory.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.rest.RestStatus;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorSupplier.java
similarity index 81%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorSupplier.java
index 2817863f6b42c..979c99018e969 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorSupplier.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java
index 726589ca7c1b5..8ae5aed72a3a5 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java
@@ -238,18 +238,17 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
+ assert buckets.isEmpty() == false;
InternalBucket reduced = null;
- List aggregationsList = new ArrayList<>(buckets.size());
for (InternalBucket bucket : buckets) {
if (reduced == null) {
reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed, keyedBucket);
} else {
reduced.docCount += bucket.docCount;
}
- aggregationsList.add(bucket.aggregations);
}
- reduced.aggregations = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ reduced.aggregations = InternalAggregations.reduce(aggregations, context);
return reduced;
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java
index 315eda4793a12..bc12555664575 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java
@@ -136,14 +136,13 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected InternalGeoGridBucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregationsList = new ArrayList<>(buckets.size());
+ assert buckets.isEmpty() == false;
long docCount = 0;
for (InternalGeoGridBucket bucket : buckets) {
docCount += bucket.docCount;
- aggregationsList.add(bucket.aggregations);
}
- final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(buckets.get(0).hashAsLong, docCount, aggs);
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
index 8a7561aaab574..a6d3627ecda28 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
@@ -393,14 +393,13 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregations = new ArrayList<>(buckets.size());
+ assert buckets.isEmpty() == false;
long docCount = 0;
for (Bucket bucket : buckets) {
docCount += bucket.docCount;
- aggregations.add(bucket.getAggregations());
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(buckets.get(0).key, docCount, aggs);
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
index b6d5a705fe0cd..88777d5abde99 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
@@ -348,14 +348,13 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregations = new ArrayList<>(buckets.size());
+ assert buckets.isEmpty() == false;
long docCount = 0;
for (Bucket bucket : buckets) {
docCount += bucket.docCount;
- aggregations.add(bucket.getAggregations());
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(buckets.get(0).key, docCount, aggs);
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java
index 59bb251368c2e..073621575f292 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java
@@ -307,7 +307,6 @@ public Number getKey(MultiBucketsAggregation.Bucket bucket) {
@Override
protected Bucket reduceBucket(List buckets, AggregationReduceContext context) {
- List aggregations = new ArrayList<>(buckets.size());
long docCount = 0;
double min = Double.POSITIVE_INFINITY;
double max = Double.NEGATIVE_INFINITY;
@@ -317,11 +316,11 @@ protected Bucket reduceBucket(List buckets, AggregationReduceContext con
min = Math.min(min, bucket.bounds.min);
max = Math.max(max, bucket.bounds.max);
sum += bucket.docCount * bucket.centroid;
- aggregations.add(bucket.getAggregations());
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
- double centroid = sum / docCount;
- Bucket.BucketBounds bounds = new Bucket.BucketBounds(min, max);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
+ final double centroid = sum / docCount;
+ final Bucket.BucketBounds bounds = new Bucket.BucketBounds(min, max);
return new Bucket(centroid, bounds, docCount, format, aggs);
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java
index f0104599396dd..33c3122e58967 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java
@@ -333,14 +333,13 @@ private Bucket createBucket(Bucket prototype, InternalAggregations aggregations,
@Override
protected Bucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregations = new ArrayList<>(buckets.size());
+ assert buckets.isEmpty() == false;
long docCount = 0;
for (InternalIpPrefix.Bucket bucket : buckets) {
docCount += bucket.docCount;
- aggregations.add(bucket.getAggregations());
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(buckets.get(0), aggs, docCount);
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java
index 131be36db2956..414af918e837d 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java
@@ -292,9 +292,9 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected Bucket reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
- List aggregationsList = buckets.stream().map(bucket -> bucket.aggregations).toList();
- final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+ assert buckets.isEmpty() == false;
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(aggs, buckets.get(0));
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java
index 046d5efb97ece..ec0ace8f3e011 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java
@@ -369,14 +369,13 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected B reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
+ assert buckets.isEmpty() == false;
long docCount = 0;
- List aggregationsList = new ArrayList<>(buckets.size());
for (Bucket bucket : buckets) {
docCount += bucket.docCount;
- aggregationsList.add(bucket.aggregations);
}
- final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
Bucket prototype = buckets.get(0);
return getFactory().createBucket(prototype.key, prototype.from, prototype.to, docCount, aggs, keyed, format);
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java
index ca3142a0c0797..ea3762503853e 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java
@@ -88,14 +88,13 @@ public abstract static class AbstractTermsBucket extends InternalMultiBucketAggr
@Override
public B reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
+ assert buckets.isEmpty() == false;
long docCount = 0;
// For the per term doc count error we add up the errors from the
// shards that did not respond with the term. To do this we add up
// the errors from the shards that did respond with the terms and
// subtract that from the sum of the error from all shards
long docCountError = 0;
- List aggregationsList = new ArrayList<>(buckets.size());
for (B bucket : buckets) {
docCount += bucket.getDocCount();
if (docCountError != -1) {
@@ -105,9 +104,9 @@ public B reduceBucket(List buckets, AggregationReduceContext context) {
docCountError += bucket.getDocCountError();
}
}
- aggregationsList.add(bucket.getAggregations());
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(docCount, aggs, docCountError, buckets.get(0));
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java
index f3ce541b1b8b9..b5aa8e3973c3e 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java
@@ -21,7 +21,6 @@
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -149,14 +148,13 @@ public InternalAggregation reduce(List aggregations, Aggreg
@Override
protected B reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
+ assert buckets.isEmpty() == false;
long docCount = 0;
- List aggregationsList = new ArrayList<>(buckets.size());
for (B bucket : buckets) {
docCount += bucket.docCount;
- aggregationsList.add(bucket.aggregations);
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(docCount, aggs, buckets.get(0));
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
index d627be186f8ff..be96683b98915 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java
@@ -276,16 +276,15 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
@Override
protected B reduceBucket(List buckets, AggregationReduceContext context) {
- assert buckets.size() > 0;
+ assert buckets.isEmpty() == false;
long subsetDf = 0;
long supersetDf = 0;
- List aggregationsList = new ArrayList<>(buckets.size());
for (B bucket : buckets) {
subsetDf += bucket.subsetDf;
supersetDf += bucket.supersetDf;
- aggregationsList.add(bucket.aggregations);
}
- InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+ final List aggregations = new BucketAggregationList<>(buckets);
+ final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return createBucket(subsetDf, buckets.get(0).subsetSize, supersetDf, buckets.get(0).supersetSize, aggs, buckets.get(0));
}
diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
index 01015ec8cc78e..29cf80b75a22a 100644
--- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
+++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -210,7 +210,7 @@ static void addCollectorsAndSearch(SearchContext searchContext) throws QueryPhas
if (searcher.timeExceeded()) {
assert timeoutRunnable != null : "TimeExceededException thrown even though timeout wasn't set";
if (searchContext.request().allowPartialSearchResults() == false) {
- throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded");
+ throw new SearchTimeoutException(searchContext.shardTarget(), "Time exceeded");
}
queryResult.searchTimedOut(true);
}
diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java
index 73ec561a7ee26..106f8c82629eb 100644
--- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java
+++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java
@@ -13,18 +13,15 @@
import org.elasticsearch.search.SearchShardTarget;
import java.io.IOException;
+import java.util.Objects;
-public class QueryPhaseExecutionException extends SearchException {
+public final class QueryPhaseExecutionException extends SearchException {
public QueryPhaseExecutionException(SearchShardTarget shardTarget, String msg, Throwable cause) {
- super(shardTarget, "Query Failed [" + msg + "]", cause);
+ super(shardTarget, "Query Failed [" + msg + "]", Objects.requireNonNull(cause, "cause cannot be null"));
}
public QueryPhaseExecutionException(StreamInput in) throws IOException {
super(in);
}
-
- public QueryPhaseExecutionException(SearchShardTarget shardTarget, String msg) {
- super(shardTarget, msg);
- }
}
diff --git a/server/src/main/java/org/elasticsearch/search/query/SearchTimeoutException.java b/server/src/main/java/org/elasticsearch/search/query/SearchTimeoutException.java
new file mode 100644
index 0000000000000..37a3b9d6b8787
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/search/query/SearchTimeoutException.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchException;
+import org.elasticsearch.search.SearchShardTarget;
+
+import java.io.IOException;
+
+/**
+ * Specific instance of {@link SearchException} that indicates that a search timeout occurred.
+ * Always returns http status 504 (Gateway Timeout)
+ */
+public class SearchTimeoutException extends SearchException {
+ public SearchTimeoutException(SearchShardTarget shardTarget, String msg) {
+ super(shardTarget, msg);
+ }
+
+ public SearchTimeoutException(StreamInput in) throws IOException {
+ super(in);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.GATEWAY_TIMEOUT;
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java
index 347bca245d144..091ce6f8a0f6d 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java
@@ -8,16 +8,35 @@
package org.elasticsearch.search.vectors;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.KnnByteVectorQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.util.Bits;
import org.elasticsearch.search.profile.query.QueryProfiler;
+import java.io.IOException;
+
public class ESKnnByteVectorQuery extends KnnByteVectorQuery implements ProfilingQuery {
+ private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS;
+
private long vectorOpsCount;
+ private final byte[] target;
public ESKnnByteVectorQuery(String field, byte[] target, int k, Query filter) {
super(field, target, k, filter);
+ this.target = target;
+ }
+
+ @Override
+ protected TopDocs approximateSearch(LeafReaderContext context, Bits acceptDocs, int visitedLimit) throws IOException {
+ // We increment visit limit by one to bypass a fencepost error in the collector
+ if (visitedLimit < Integer.MAX_VALUE) {
+ visitedLimit += 1;
+ }
+ TopDocs results = context.reader().searchNearestVectors(field, target, k, acceptDocs, visitedLimit);
+ return results != null ? results : NO_RESULTS;
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java
index e83a90a3c4df8..4fa4db1f4ea95 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java
@@ -8,16 +8,24 @@
package org.elasticsearch.search.vectors;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.KnnFloatVectorQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.util.Bits;
import org.elasticsearch.search.profile.query.QueryProfiler;
+import java.io.IOException;
+
public class ESKnnFloatVectorQuery extends KnnFloatVectorQuery implements ProfilingQuery {
+ private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS;
private long vectorOpsCount;
+ private final float[] target;
public ESKnnFloatVectorQuery(String field, float[] target, int k, Query filter) {
super(field, target, k, filter);
+ this.target = target;
}
@Override
@@ -27,6 +35,16 @@ protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) {
return topK;
}
+ @Override
+ protected TopDocs approximateSearch(LeafReaderContext context, Bits acceptDocs, int visitedLimit) throws IOException {
+ // We increment visit limit by one to bypass a fencepost error in the collector
+ if (visitedLimit < Integer.MAX_VALUE) {
+ visitedLimit += 1;
+ }
+ TopDocs results = context.reader().searchNearestVectors(field, target, k, acceptDocs, visitedLimit);
+ return results != null ? results : NO_RESULTS;
+ }
+
@Override
public void profile(QueryProfiler queryProfiler) {
queryProfiler.setVectorOpsCount(vectorOpsCount);
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
index 96b06e8b49f2d..0b460b5cb2fb7 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
@@ -8,29 +8,39 @@
package org.elasticsearch.snapshots;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.RepositoriesMetadata;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.health.Diagnosis;
-import org.elasticsearch.health.Diagnosis.Resource.Type;
import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorImpact;
import org.elasticsearch.health.HealthIndicatorResult;
import org.elasticsearch.health.HealthIndicatorService;
+import org.elasticsearch.health.HealthStatus;
import org.elasticsearch.health.ImpactArea;
import org.elasticsearch.health.SimpleHealthIndicatorDetails;
import org.elasticsearch.health.node.HealthInfo;
+import org.elasticsearch.health.node.RepositoriesHealthInfo;
import org.elasticsearch.repositories.RepositoryData;
-import java.util.Collections;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
-import static org.elasticsearch.common.Strings.collectionToDelimitedStringWithLimit;
+import static org.elasticsearch.cluster.node.DiscoveryNode.DISCOVERY_NODE_COMPARATOR;
import static org.elasticsearch.common.util.CollectionUtils.limitSize;
+import static org.elasticsearch.health.Diagnosis.Resource.Type.SNAPSHOT_REPOSITORY;
import static org.elasticsearch.health.HealthStatus.GREEN;
-import static org.elasticsearch.health.HealthStatus.RED;
+import static org.elasticsearch.health.HealthStatus.UNKNOWN;
+import static org.elasticsearch.health.HealthStatus.YELLOW;
/**
* This indicator reports health for snapshot repositories.
@@ -44,11 +54,22 @@ public class RepositoryIntegrityHealthIndicatorService implements HealthIndicato
public static final String NAME = "repository_integrity";
- public static final String HELP_URL = "https://ela.st/fix-repository-integrity";
+ private static final String HELP_URL = "https://ela.st/fix-repository-integrity";
- public static final String REPOSITORY_CORRUPTED_IMPACT_ID = "repository_corruption";
+ public static final String NO_REPOS_CONFIGURED = "No snapshot repositories configured.";
+ public static final String ALL_REPOS_HEALTHY = "All repositories are healthy.";
+ public static final String NO_REPO_HEALTH_INFO = "No repository health info.";
- public static final Diagnosis.Definition CORRUPTED_REPOSITORY = new Diagnosis.Definition(
+ public static final List IMPACTS = List.of(
+ new HealthIndicatorImpact(
+ NAME,
+ "backups_at_risk",
+ 2,
+ "Data in the affected snapshot repositories may be lost and cannot be restored.",
+ List.of(ImpactArea.BACKUP)
+ )
+ );
+ public static final Diagnosis.Definition CORRUPTED_DEFINITION = new Diagnosis.Definition(
NAME,
"corrupt_repo_integrity",
"Multiple clusters are writing to the same repository.",
@@ -56,9 +77,22 @@ public class RepositoryIntegrityHealthIndicatorService implements HealthIndicato
+ " to this cluster.",
HELP_URL
);
-
- public static final String NO_REPOS_CONFIGURED = "No snapshot repositories configured.";
- public static final String NO_CORRUPT_REPOS = "No corrupted snapshot repositories.";
+ public static final Diagnosis.Definition UNKNOWN_DEFINITION = new Diagnosis.Definition(
+ NAME,
+ "unknown_repository",
+ "The repository uses an unknown type.",
+ "Ensure that all required plugins are installed on the affected nodes.",
+ HELP_URL
+ );
+ public static final Diagnosis.Definition INVALID_DEFINITION = new Diagnosis.Definition(
+ NAME,
+ "invalid_repository",
+ "An exception occurred while trying to initialize the repository.",
+ """
+ Make sure all nodes in the cluster are in sync with each other.\
+ Refer to the nodes’ logs for detailed information on why the repository initialization failed.""",
+ HELP_URL
+ );
private final ClusterService clusterService;
@@ -73,78 +107,162 @@ public String name() {
@Override
public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) {
+ var clusterState = clusterService.state();
var snapshotMetadata = RepositoriesMetadata.get(clusterService.state());
- if (snapshotMetadata.repositories().isEmpty()) {
- return createIndicator(
- GREEN,
- NO_REPOS_CONFIGURED,
- HealthIndicatorDetails.EMPTY,
- Collections.emptyList(),
- Collections.emptyList()
- );
+ var repositories = snapshotMetadata.repositories();
+ if (repositories.isEmpty()) {
+ return createIndicator(GREEN, NO_REPOS_CONFIGURED, HealthIndicatorDetails.EMPTY, List.of(), List.of());
}
- var corrupted = snapshotMetadata.repositories()
- .stream()
- .filter(repository -> repository.generation() == RepositoryData.CORRUPTED_REPO_GEN)
- .map(RepositoryMetadata::name)
- .toList();
-
- var totalRepositories = snapshotMetadata.repositories().size();
- var corruptedRepositories = corrupted.size();
-
- if (corrupted.isEmpty()) {
- return createIndicator(
- GREEN,
- "No corrupted snapshot repositories.",
- verbose ? new SimpleHealthIndicatorDetails(Map.of("total_repositories", totalRepositories)) : HealthIndicatorDetails.EMPTY,
- Collections.emptyList(),
- Collections.emptyList()
- );
- }
- List impacts = Collections.singletonList(
- new HealthIndicatorImpact(
- NAME,
- REPOSITORY_CORRUPTED_IMPACT_ID,
- 1,
- String.format(
- Locale.ROOT,
- "Data in corrupted snapshot repositor%s %s may be lost and cannot be restored.",
- corrupted.size() > 1 ? "ies" : "y",
- limitSize(corrupted, 10)
- ),
- List.of(ImpactArea.BACKUP)
- )
- );
+ var repositoryHealthAnalyzer = new RepositoryHealthAnalyzer(clusterState, repositories, healthInfo.repositoriesInfoByNode());
return createIndicator(
- RED,
- createCorruptedRepositorySummary(corrupted),
- verbose
- ? new SimpleHealthIndicatorDetails(
- Map.of(
- "total_repositories",
- totalRepositories,
- "corrupted_repositories",
- corruptedRepositories,
- "corrupted",
- limitSize(corrupted, 10)
- )
- )
- : HealthIndicatorDetails.EMPTY,
- impacts,
- List.of(
- new Diagnosis(
- CORRUPTED_REPOSITORY,
- List.of(new Diagnosis.Resource(Type.SNAPSHOT_REPOSITORY, limitSize(corrupted, maxAffectedResourcesCount)))
- )
- )
+ repositoryHealthAnalyzer.getHealthStatus(),
+ repositoryHealthAnalyzer.getSymptom(),
+ repositoryHealthAnalyzer.getDetails(verbose),
+ repositoryHealthAnalyzer.getImpacts(),
+ repositoryHealthAnalyzer.getDiagnoses(maxAffectedResourcesCount)
);
}
- private static String createCorruptedRepositorySummary(List corrupted) {
- var message = new StringBuilder().append("Detected [").append(corrupted.size()).append("] corrupted snapshot repositories: ");
- collectionToDelimitedStringWithLimit(corrupted, ",", "[", "].", 1024, message);
- return message.toString();
+ /**
+ * Analyzer for the cluster's repositories health; aids in constructing a {@link HealthIndicatorResult}.
+ */
+ static class RepositoryHealthAnalyzer {
+ private final ClusterState clusterState;
+ private final int totalRepositories;
+ private final List corruptedRepositories;
+ private final Set unknownRepositories = new HashSet<>();
+ private final Set nodesWithUnknownRepos = new HashSet<>();
+ private final Set invalidRepositories = new HashSet<>();
+ private final Set nodesWithInvalidRepos = new HashSet<>();
+ private final HealthStatus healthStatus;
+
+ private RepositoryHealthAnalyzer(
+ ClusterState clusterState,
+ List repositories,
+ Map repositoriesHealthByNode
+ ) {
+ this.clusterState = clusterState;
+ this.totalRepositories = repositories.size();
+ this.corruptedRepositories = repositories.stream()
+ .filter(repository -> repository.generation() == RepositoryData.CORRUPTED_REPO_GEN)
+ .map(RepositoryMetadata::name)
+ .sorted()
+ .toList();
+
+ repositoriesHealthByNode.forEach((nodeId, healthInfo) -> {
+ unknownRepositories.addAll(healthInfo.unknownRepositories());
+ if (healthInfo.unknownRepositories().isEmpty() == false) {
+ nodesWithUnknownRepos.add(nodeId);
+ }
+ invalidRepositories.addAll(healthInfo.invalidRepositories());
+ if (healthInfo.invalidRepositories().isEmpty() == false) {
+ nodesWithInvalidRepos.add(nodeId);
+ }
+ });
+
+ if (corruptedRepositories.isEmpty() == false
+ || unknownRepositories.isEmpty() == false
+ || invalidRepositories.isEmpty() == false) {
+ healthStatus = YELLOW;
+ } else if (repositoriesHealthByNode.isEmpty()) {
+ healthStatus = UNKNOWN;
+ } else {
+ healthStatus = GREEN;
+ }
+ }
+
+ public HealthStatus getHealthStatus() {
+ return healthStatus;
+ }
+
+ public String getSymptom() {
+ if (healthStatus == GREEN) {
+ return ALL_REPOS_HEALTHY;
+ } else if (healthStatus == UNKNOWN) {
+ return NO_REPO_HEALTH_INFO;
+ }
+
+ return "Detected "
+ + Stream.of(
+ generateSymptomString("corrupted", corruptedRepositories.size()),
+ generateSymptomString("unknown", unknownRepositories.size()),
+ generateSymptomString("invalid", invalidRepositories.size())
+ ).filter(Objects::nonNull).collect(Collectors.joining(", and "))
+ + ".";
+ }
+
+ private static String generateSymptomString(String type, long size) {
+ if (size == 0) {
+ return null;
+ }
+
+ return String.format(Locale.ROOT, "[%d] %s snapshot repositor%s", size, type, size > 1 ? "ies" : "y");
+ }
+
+ public HealthIndicatorDetails getDetails(boolean verbose) {
+ if (verbose == false) {
+ return HealthIndicatorDetails.EMPTY;
+ }
+ Map map = new HashMap<>();
+ map.put("total_repositories", totalRepositories);
+
+ if (healthStatus != GREEN) {
+ map.put("corrupted_repositories", corruptedRepositories.size());
+ map.put("corrupted", limitSize(corruptedRepositories, 10));
+
+ if (healthStatus != UNKNOWN) {
+ map.put("unknown_repositories", unknownRepositories.size());
+ map.put("invalid_repositories", invalidRepositories.size());
+ }
+ }
+
+ return new SimpleHealthIndicatorDetails(map);
+ }
+
+ public List getImpacts() {
+ if (healthStatus == GREEN || healthStatus == UNKNOWN) {
+ return List.of();
+ }
+ return IMPACTS;
+ }
+
+ public List getDiagnoses(int maxAffectedResourcesCount) {
+ var diagnoses = new ArrayList();
+ if (corruptedRepositories.isEmpty() == false) {
+ diagnoses.add(
+ new Diagnosis(
+ CORRUPTED_DEFINITION,
+ List.of(new Diagnosis.Resource(SNAPSHOT_REPOSITORY, limitSize(corruptedRepositories, maxAffectedResourcesCount)))
+ )
+ );
+ }
+ if (unknownRepositories.size() > 0) {
+ diagnoses.add(createDiagnosis(UNKNOWN_DEFINITION, unknownRepositories, nodesWithUnknownRepos, maxAffectedResourcesCount));
+ }
+ if (invalidRepositories.size() > 0) {
+ diagnoses.add(createDiagnosis(INVALID_DEFINITION, invalidRepositories, nodesWithInvalidRepos, maxAffectedResourcesCount));
+ }
+ return diagnoses;
+ }
+
+ private Diagnosis createDiagnosis(
+ Diagnosis.Definition definition,
+ Set repos,
+ Set nodes,
+ int maxAffectedResourcesCount
+ ) {
+ var reposView = repos.stream().sorted().limit(maxAffectedResourcesCount).toList();
+ var nodesView = nodes.stream()
+ .map(nodeId -> clusterState.nodes().get(nodeId))
+ .sorted(DISCOVERY_NODE_COMPARATOR)
+ .limit(maxAffectedResourcesCount)
+ .toList();
+ return new Diagnosis(
+ definition,
+ List.of(new Diagnosis.Resource(SNAPSHOT_REPOSITORY, reposView), new Diagnosis.Resource(nodesView))
+ );
+ }
}
}
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
index 6f6d62d7677d8..9ac76e653b640 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -974,10 +974,10 @@ static void validateSnapshotRestorable(
if (IndexVersion.current().before(snapshotInfo.version())) {
throw new SnapshotRestoreException(
new Snapshot(repository.name(), snapshotInfo.snapshotId()),
- "the snapshot was created with index version ["
- + snapshotInfo.version()
- + "] which is higher than the version used by this node ["
- + IndexVersion.current()
+ "the snapshot was created with version ["
+ + snapshotInfo.version().toReleaseVersion()
+ + "] which is higher than the version of this node ["
+ + IndexVersion.current().toReleaseVersion()
+ "]"
);
}
diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat
new file mode 100644
index 0000000000000..ff848275f2ba1
--- /dev/null
+++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat
@@ -0,0 +1,2 @@
+org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat
+org.elasticsearch.index.codec.vectors.ES813Int8FlatVectorFormat
diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv
index ad2c89d18b70a..8efe3b01eefd4 100644
--- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv
+++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv
@@ -65,6 +65,7 @@
7.17.15,7171599
7.17.16,7171699
7.17.17,7171799
+7.17.18,7171899
8.0.0,8000099
8.0.1,8000199
8.1.0,8010099
@@ -109,3 +110,4 @@
8.11.3,8512001
8.11.4,8512001
8.12.0,8560000
+8.12.1,8560001
diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv
index 644cc362d3d4c..43220565ab871 100644
--- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv
+++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv
@@ -65,6 +65,7 @@
7.17.15,7171599
7.17.16,7171699
7.17.17,7171799
+7.17.18,7171899
8.0.0,8000099
8.0.1,8000199
8.1.0,8010099
@@ -109,3 +110,4 @@
8.11.3,8500003
8.11.4,8500003
8.12.0,8500008
+8.12.1,8500010
diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
index 3e0d9193ffed9..134480eb839d3 100644
--- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
+++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
@@ -80,6 +80,7 @@
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.UnsupportedAggregationOnDownsampledIndex;
import org.elasticsearch.search.internal.ShardSearchContextId;
+import org.elasticsearch.search.query.SearchTimeoutException;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotException;
import org.elasticsearch.snapshots.SnapshotId;
@@ -827,6 +828,7 @@ public void testIds() {
ids.put(173, TooManyScrollContextsException.class);
ids.put(174, AggregationExecutionException.InvalidPath.class);
ids.put(175, AutoscalingMissedIndicesUpdateException.class);
+ ids.put(176, SearchTimeoutException.class);
Map, Integer> reverse = new HashMap<>();
for (Map.Entry> entry : ids.entrySet()) {
diff --git a/server/src/test/java/org/elasticsearch/ReleaseVersionsTests.java b/server/src/test/java/org/elasticsearch/ReleaseVersionsTests.java
index 4ed262da07407..cee43954e9ce7 100644
--- a/server/src/test/java/org/elasticsearch/ReleaseVersionsTests.java
+++ b/server/src/test/java/org/elasticsearch/ReleaseVersionsTests.java
@@ -29,7 +29,7 @@ public void testReturnsRange() {
IntFunction versions = ReleaseVersions.generateVersionsLookup(ReleaseVersionsTests.class);
assertThat(versions.apply(17), equalTo("8.1.2-8.2.0"));
- expectThrows(AssertionError.class, () -> versions.apply(9));
+ assertThat(versions.apply(9), equalTo("0.0.0-8.0.0"));
assertThat(versions.apply(24), equalTo("8.2.2-snapshot[24]"));
}
}
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java
index e65d99c64ae5e..0290bfb9c236f 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java
@@ -491,8 +491,8 @@ public void testChunking() {
}
private static int expectedChunks(NodeStats nodeStats, NodeStatsLevel level) {
- // expectedChunks = number of static chunks (8 at the moment, see NodeStats#toXContentChunked) + number of variable chunks
- return 8 + expectedChunks(nodeStats.getHttp()) //
+ return 7 // number of static chunks, see NodeStats#toXContentChunked
+ + expectedChunks(nodeStats.getHttp()) //
+ expectedChunks(nodeStats.getIndices(), level) //
+ expectedChunks(nodeStats.getTransport()) //
+ expectedChunks(nodeStats.getIngestStats()) //
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestBuilderTests.java
index 8843801e528a3..27b1104163d67 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestBuilderTests.java
@@ -10,8 +10,6 @@
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
-import org.elasticsearch.action.support.WriteRequest;
-import org.elasticsearch.core.TimeValue;
import org.elasticsearch.test.ESTestCase;
public class BulkRequestBuilderTests extends ESTestCase {
@@ -21,17 +19,5 @@ public void testValidation() {
bulkRequestBuilder.add(new IndexRequestBuilder(null, randomAlphaOfLength(10)));
bulkRequestBuilder.add(new IndexRequest());
expectThrows(IllegalStateException.class, bulkRequestBuilder::request);
-
- bulkRequestBuilder = new BulkRequestBuilder(null, null);
- bulkRequestBuilder.add(new IndexRequestBuilder(null, randomAlphaOfLength(10)));
- bulkRequestBuilder.setTimeout(randomTimeValue());
- bulkRequestBuilder.setTimeout(TimeValue.timeValueSeconds(randomIntBetween(1, 30)));
- expectThrows(IllegalStateException.class, bulkRequestBuilder::request);
-
- bulkRequestBuilder = new BulkRequestBuilder(null, null);
- bulkRequestBuilder.add(new IndexRequestBuilder(null, randomAlphaOfLength(10)));
- bulkRequestBuilder.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values()).getValue());
- bulkRequestBuilder.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values()));
- expectThrows(IllegalStateException.class, bulkRequestBuilder::request);
}
}
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java
new file mode 100644
index 0000000000000..962c796e18c2a
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.elasticsearch.xcontent.ObjectPath;
+import org.elasticsearch.xcontent.json.JsonXContent;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.CoreMatchers.startsWith;
+
+public class FailureStoreDocumentTests extends ESTestCase {
+
+ public void testFailureStoreDocumentConverstion() throws Exception {
+ IndexRequest source = new IndexRequest("original_index").routing("fake_routing")
+ .id("1")
+ .source(JsonXContent.contentBuilder().startObject().field("key", "value").endObject());
+
+ // The exception will be wrapped for the test to make sure the converter correctly unwraps it
+ Exception exception = new ElasticsearchException("Test exception please ignore");
+ exception = new RemoteTransportException("Test exception wrapper, please ignore", exception);
+
+ String targetIndexName = "rerouted_index";
+ long testTime = 1702357200000L; // 2023-12-12T05:00:00.000Z
+
+ IndexRequest convertedRequest = FailureStoreDocument.transformFailedRequest(source, exception, targetIndexName, () -> testTime);
+
+ // Retargeting write
+ assertThat(convertedRequest.id(), is(nullValue()));
+ assertThat(convertedRequest.routing(), is(nullValue()));
+ assertThat(convertedRequest.index(), is(equalTo(targetIndexName)));
+ assertThat(convertedRequest.opType(), is(DocWriteRequest.OpType.CREATE));
+
+ // Original document content is no longer in same place
+ assertThat("Expected original document to be modified", convertedRequest.sourceAsMap().get("key"), is(nullValue()));
+
+ // Assert document contents
+ assertThat(ObjectPath.eval("@timestamp", convertedRequest.sourceAsMap()), is(equalTo("2023-12-12T05:00:00.000Z")));
+
+ assertThat(ObjectPath.eval("document.id", convertedRequest.sourceAsMap()), is(equalTo("1")));
+ assertThat(ObjectPath.eval("document.routing", convertedRequest.sourceAsMap()), is(equalTo("fake_routing")));
+ assertThat(ObjectPath.eval("document.index", convertedRequest.sourceAsMap()), is(equalTo("original_index")));
+ assertThat(ObjectPath.eval("document.source.key", convertedRequest.sourceAsMap()), is(equalTo("value")));
+
+ assertThat(ObjectPath.eval("error.type", convertedRequest.sourceAsMap()), is(equalTo("exception")));
+ assertThat(ObjectPath.eval("error.message", convertedRequest.sourceAsMap()), is(equalTo("Test exception please ignore")));
+ assertThat(
+ ObjectPath.eval("error.stack_trace", convertedRequest.sourceAsMap()),
+ startsWith("org.elasticsearch.ElasticsearchException: Test exception please ignore")
+ );
+ assertThat(
+ ObjectPath.eval("error.stack_trace", convertedRequest.sourceAsMap()),
+ containsString("at org.elasticsearch.action.bulk.FailureStoreDocumentTests.testFailureStoreDocumentConverstion")
+ );
+
+ assertThat(convertedRequest.isWriteToFailureStore(), is(true));
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java
index f90289c26e3a2..988a92352649a 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java
@@ -15,6 +15,7 @@
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActionTestUtils;
import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@@ -25,6 +26,7 @@
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.VersionType;
@@ -41,6 +43,7 @@
import java.util.function.Function;
import static java.util.Collections.emptySet;
+import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -114,12 +117,15 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state)
final ThreadPool threadPool = mock(ThreadPool.class);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(threadPool);
+ FeatureService mockFeatureService = mock(FeatureService.class);
+ when(mockFeatureService.clusterHasFeature(any(), any())).thenReturn(true);
TransportBulkAction action = new TransportBulkAction(
threadPool,
transportService,
clusterService,
null,
- null,
+ mockFeatureService,
+ new NodeClient(Settings.EMPTY, threadPool),
mock(ActionFilters.class),
indexNameExpressionResolver,
new IndexingPressure(Settings.EMPTY),
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
index 43eadbc873012..2d6492e4e73a4 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
@@ -18,6 +18,7 @@
import org.elasticsearch.action.support.ActionTestUtils;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateApplier;
@@ -31,10 +32,12 @@
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.TriConsumer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
@@ -54,13 +57,16 @@
import org.mockito.Captor;
import org.mockito.MockitoAnnotations;
+import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
+import java.util.function.Predicate;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.sameInstance;
@@ -82,10 +88,12 @@ public class TransportBulkActionIngestTests extends ESTestCase {
*/
private static final String WITH_DEFAULT_PIPELINE = "index_with_default_pipeline";
private static final String WITH_DEFAULT_PIPELINE_ALIAS = "alias_for_index_with_default_pipeline";
+ private static final String WITH_FAILURE_STORE_ENABLED = "data-stream-failure-store-enabled";
private static final Settings SETTINGS = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true).build();
private static final Thread DUMMY_WRITE_THREAD = new Thread(ThreadPool.Names.WRITE);
+ private FeatureService mockFeatureService;
/** Services needed by bulk action */
TransportService transportService;
@@ -95,6 +103,10 @@ public class TransportBulkActionIngestTests extends ESTestCase {
/** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */
@Captor
+ ArgumentCaptor> redirectPredicate;
+ @Captor
+ ArgumentCaptor> redirectHandler;
+ @Captor
ArgumentCaptor> failureHandler;
@Captor
ArgumentCaptor> completionHandler;
@@ -131,7 +143,8 @@ class TestTransportBulkAction extends TransportBulkAction {
transportService,
clusterService,
ingestService,
- null,
+ mockFeatureService,
+ new NodeClient(Settings.EMPTY, threadPool),
new ActionFilters(Collections.emptySet()),
TestIndexNameExpressionResolver.newInstance(),
new IndexingPressure(SETTINGS),
@@ -176,7 +189,7 @@ class TestSingleItemBulkWriteAction extends TransportSingleItemBulkWriteAction> req = bulkDocsItr.getValue().iterator();
failureHandler.getValue().accept(0, exception); // have an exception for our one index request
indexRequest2.setPipeline(IngestService.NOOP_PIPELINE_NAME); // this is done by the real pipeline execution service when processing
- completionHandler.getValue().accept(DUMMY_WRITE_THREAD, null);
+ assertTrue(redirectPredicate.getValue().test(WITH_FAILURE_STORE_ENABLED + "-1")); // ensure redirects on failure store data stream
+ assertFalse(redirectPredicate.getValue().test(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices
+ assertFalse(redirectPredicate.getValue().test("index")); // no redirects for non-existant indices with no templates
+ redirectHandler.getValue().apply(2, WITH_FAILURE_STORE_ENABLED + "-1", exception); // exception and redirect for request 3 (slot 2)
+ completionHandler.getValue().accept(DUMMY_WRITE_THREAD, null); // all ingestion completed
assertTrue(action.isExecuted);
assertFalse(responseCalled.get()); // listener would only be called by real index action, not our mocked one
verifyNoMoreInteractions(transportService);
@@ -324,6 +358,8 @@ public void testSingleItemBulkActionIngestLocal() throws Exception {
eq(1),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
@@ -370,6 +406,8 @@ public void testIngestSystemLocal() throws Exception {
eq(bulkRequest.numberOfActions()),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.SYSTEM_WRITE)
@@ -403,7 +441,7 @@ public void testIngestForward() throws Exception {
ActionTestUtils.execute(action, null, bulkRequest, listener);
// should not have executed ingest locally
- verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any());
+ verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any(), any(), any());
// but instead should have sent to a remote node with the transport service
ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class);
verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture());
@@ -443,7 +481,7 @@ public void testSingleItemBulkActionIngestForward() throws Exception {
ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, listener);
// should not have executed ingest locally
- verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any());
+ verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any(), any(), any());
// but instead should have sent to a remote node with the transport service
ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class);
verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture());
@@ -527,6 +565,8 @@ private void validatePipelineWithBulkUpsert(@Nullable String indexRequestIndexNa
eq(bulkRequest.numberOfActions()),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
@@ -575,6 +615,8 @@ public void testDoExecuteCalledTwiceCorrectly() throws Exception {
eq(1),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
@@ -669,6 +711,8 @@ public void testFindDefaultPipelineFromTemplateMatch() {
eq(1),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
@@ -707,6 +751,8 @@ public void testFindDefaultPipelineFromV2TemplateMatch() {
eq(1),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
@@ -734,6 +780,8 @@ public void testIngestCallbackExceptionHandled() throws Exception {
eq(bulkRequest.numberOfActions()),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
@@ -771,6 +819,8 @@ private void validateDefaultPipeline(IndexRequest indexRequest) {
eq(1),
bulkDocsItr.capture(),
any(),
+ any(),
+ any(),
failureHandler.capture(),
completionHandler.capture(),
eq(Names.WRITE)
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java
index 1af1ef32aa8b1..ad522e36f9bd9 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java
@@ -20,8 +20,11 @@
import org.elasticsearch.action.support.ActionTestUtils;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStream;
+import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
import org.elasticsearch.cluster.metadata.IndexAbstraction;
import org.elasticsearch.cluster.metadata.IndexAbstraction.ConcreteIndex;
import org.elasticsearch.cluster.metadata.IndexMetadata;
@@ -32,6 +35,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
@@ -52,6 +56,7 @@
import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
@@ -61,6 +66,10 @@
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+import static org.junit.Assume.assumeThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
public class TransportBulkActionTests extends ESTestCase {
@@ -70,6 +79,7 @@ public class TransportBulkActionTests extends ESTestCase {
private TestThreadPool threadPool;
private TestTransportBulkAction bulkAction;
+ private FeatureService mockFeatureService;
class TestTransportBulkAction extends TransportBulkAction {
@@ -83,7 +93,8 @@ class TestTransportBulkAction extends TransportBulkAction {
transportService,
clusterService,
null,
- null,
+ mockFeatureService,
+ new NodeClient(Settings.EMPTY, TransportBulkActionTests.this.threadPool),
new ActionFilters(Collections.emptySet()),
new Resolver(),
new IndexingPressure(Settings.EMPTY),
@@ -130,6 +141,8 @@ public void setUp() throws Exception {
);
transportService.start();
transportService.acceptIncomingRequests();
+ mockFeatureService = mock(FeatureService.class);
+ when(mockFeatureService.clusterHasFeature(any(), any())).thenReturn(true);
bulkAction = new TestTransportBulkAction();
}
@@ -338,6 +351,100 @@ public void testRejectionAfterCreateIndexIsPropagated() throws Exception {
}
}
+ public void testResolveFailureStoreFromMetadata() throws Exception {
+ assumeThat(DataStream.isFailureStoreEnabled(), is(true));
+
+ String dataStreamWithFailureStore = "test-data-stream-failure-enabled";
+ String dataStreamWithoutFailureStore = "test-data-stream-failure-disabled";
+ long testTime = randomMillisUpToYear9999();
+
+ IndexMetadata backingIndex1 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithFailureStore, testTime).build();
+ IndexMetadata backingIndex2 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithoutFailureStore, testTime).build();
+ IndexMetadata failureStoreIndex1 = DataStreamTestHelper.createFirstFailureStore(dataStreamWithFailureStore, testTime).build();
+
+ Metadata metadata = Metadata.builder()
+ .dataStreams(
+ Map.of(
+ dataStreamWithFailureStore,
+ DataStreamTestHelper.newInstance(
+ dataStreamWithFailureStore,
+ List.of(backingIndex1.getIndex()),
+ 1L,
+ Map.of(),
+ false,
+ null,
+ List.of(failureStoreIndex1.getIndex())
+ ),
+ dataStreamWithoutFailureStore,
+ DataStreamTestHelper.newInstance(
+ dataStreamWithoutFailureStore,
+ List.of(backingIndex2.getIndex()),
+ 1L,
+ Map.of(),
+ false,
+ null,
+ List.of()
+ )
+ ),
+ Map.of()
+ )
+ .indices(
+ Map.of(
+ backingIndex1.getIndex().getName(),
+ backingIndex1,
+ backingIndex2.getIndex().getName(),
+ backingIndex2,
+ failureStoreIndex1.getIndex().getName(),
+ failureStoreIndex1
+ )
+ )
+ .build();
+
+ // Data stream with failure store should store failures
+ assertThat(TransportBulkAction.shouldStoreFailure(dataStreamWithFailureStore, metadata, testTime), is(true));
+ // Data stream without failure store should not
+ assertThat(TransportBulkAction.shouldStoreFailure(dataStreamWithoutFailureStore, metadata, testTime), is(false));
+ // An index should not be considered for failure storage
+ assertThat(TransportBulkAction.shouldStoreFailure(backingIndex1.getIndex().getName(), metadata, testTime), is(false));
+ // even if that index is itself a failure store
+ assertThat(TransportBulkAction.shouldStoreFailure(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(false));
+ }
+
+ public void testResolveFailureStoreFromTemplate() throws Exception {
+ assumeThat(DataStream.isFailureStoreEnabled(), is(true));
+
+ String dsTemplateWithFailureStore = "test-data-stream-failure-enabled";
+ String dsTemplateWithoutFailureStore = "test-data-stream-failure-disabled";
+ String indexTemplate = "test-index";
+ long testTime = randomMillisUpToYear9999();
+
+ Metadata metadata = Metadata.builder()
+ .indexTemplates(
+ Map.of(
+ dsTemplateWithFailureStore,
+ ComposableIndexTemplate.builder()
+ .indexPatterns(List.of(dsTemplateWithFailureStore + "-*"))
+ .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true))
+ .build(),
+ dsTemplateWithoutFailureStore,
+ ComposableIndexTemplate.builder()
+ .indexPatterns(List.of(dsTemplateWithoutFailureStore + "-*"))
+ .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, false))
+ .build(),
+ indexTemplate,
+ ComposableIndexTemplate.builder().indexPatterns(List.of(indexTemplate + "-*")).build()
+ )
+ )
+ .build();
+
+ // Data stream with failure store should store failures
+ assertThat(TransportBulkAction.shouldStoreFailure(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true));
+ // Data stream without failure store should not
+ assertThat(TransportBulkAction.shouldStoreFailure(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false));
+ // An index template should not be considered for failure storage
+ assertThat(TransportBulkAction.shouldStoreFailure(indexTemplate + "-1", metadata, testTime), is(false));
+ }
+
private BulkRequest buildBulkRequest(List indices) {
BulkRequest request = new BulkRequest();
for (String index : indices) {
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
index db2e5ca02c0ae..a2e54a1c7c3b8 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
@@ -247,6 +247,7 @@ static class TestTransportBulkAction extends TransportBulkAction {
transportService,
clusterService,
null,
+ null,
client,
actionFilters,
indexNameExpressionResolver,
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java
index 0a3adaf54a8ea..2657bdef8c09d 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java
@@ -15,6 +15,7 @@
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.SimulateIndexResponse;
import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeUtils;
import org.elasticsearch.cluster.service.ClusterService;
@@ -74,6 +75,7 @@ class TestTransportSimulateBulkAction extends TransportSimulateBulkAction {
clusterService,
null,
null,
+ new NodeClient(Settings.EMPTY, TransportSimulateBulkActionTests.this.threadPool),
new ActionFilters(Collections.emptySet()),
new TransportBulkActionTookTests.Resolver(),
new IndexingPressure(Settings.EMPTY),
diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
index df8aa6ce07b61..327f31a247c30 100644
--- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
@@ -14,6 +14,7 @@
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.replication.ReplicationResponse;
+import org.elasticsearch.cluster.metadata.DataStreamAlias;
import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.bytes.BytesArray;
@@ -440,6 +441,25 @@ public void testGetConcreteWriteIndex() {
equalTo("Error get data stream timestamp field: timestamp [10.0] type [class java.lang.Double] error")
);
}
+
+ {
+ // Alias to time series data stream
+ DataStreamAlias alias = new DataStreamAlias("my-alias", List.of(tsdbDataStream), tsdbDataStream, null);
+ var metadataBuilder3 = Metadata.builder(metadata);
+ metadataBuilder3.put(alias.getName(), tsdbDataStream, true, null);
+ var metadata3 = metadataBuilder3.build();
+ IndexRequest request = new IndexRequest(alias.getName());
+ request.opType(DocWriteRequest.OpType.CREATE);
+ request.source(renderSource(source, start1), XContentType.JSON);
+ var result = request.getConcreteWriteIndex(metadata3.getIndicesLookup().get(alias.getName()), metadata3);
+ assertThat(result, equalTo(metadata3.dataStreams().get(tsdbDataStream).getIndices().get(0)));
+
+ request = new IndexRequest(alias.getName());
+ request.opType(DocWriteRequest.OpType.CREATE);
+ request.source(renderSource(source, start2), XContentType.JSON);
+ result = request.getConcreteWriteIndex(metadata3.getIndicesLookup().get(alias.getName()), metadata3);
+ assertThat(result, equalTo(metadata3.dataStreams().get(tsdbDataStream).getIndices().get(1)));
+ }
}
static String renderSource(String sourceTemplate, Instant instant) {
diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestBuilderTests.java
new file mode 100644
index 0000000000000..b2ab56c73c584
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestBuilderTests.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.test.ESTestCase;
+
+public class UpdateRequestBuilderTests extends ESTestCase {
+
+ public void testValidation() {
+ UpdateRequestBuilder updateRequestBuilder = new UpdateRequestBuilder(null);
+ updateRequestBuilder.setFetchSource(randomAlphaOfLength(10), randomAlphaOfLength(10));
+ updateRequestBuilder.setFetchSource(true);
+ expectThrows(IllegalStateException.class, updateRequestBuilder::request);
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
index 32a1e1d14876f..00602a5f35d76 100644
--- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
@@ -625,9 +625,9 @@ public void testMessages() {
decision.getExplanation(),
is(
"max supported index version ["
- + oldNode.node().getMaxIndexVersion()
+ + oldNode.node().getMaxIndexVersion().toReleaseVersion()
+ "] is older than the snapshot version ["
- + newNode.node().getMaxIndexVersion()
+ + newNode.node().getMaxIndexVersion().toReleaseVersion()
+ "]"
)
);
@@ -642,9 +642,9 @@ public void testMessages() {
decision.getExplanation(),
is(
"max supported index version ["
- + newNode.node().getMaxIndexVersion()
+ + newNode.node().getMaxIndexVersion().toReleaseVersion()
+ "] is the same or newer than snapshot version ["
- + oldNode.node().getMaxIndexVersion()
+ + oldNode.node().getMaxIndexVersion().toReleaseVersion()
+ "]"
)
);
diff --git a/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java b/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java
index b3b598f2bd38c..edcf395753281 100644
--- a/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java
@@ -128,7 +128,7 @@ public void testPreventJoinClusterWithUnsupportedTransportVersion() {
compatibilityVersions
)
);
- assertThat(e.getMessage(), containsString("may not join a cluster with minimum transport version"));
+ assertThat(e.getMessage(), containsString("may not join a cluster with minimum version"));
}
public void testPreventJoinClusterWithUnsupportedMappingsVersion() {
diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java
index 7ceebab711f35..f00697a3ae870 100644
--- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java
+++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java
@@ -1371,4 +1371,12 @@ public void testParsingMissingTimezone() {
long millisJoda = DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss").parseMillis("2018-02-18 17:47:17");
assertThat(millisJava, is(millisJoda));
}
+
+ // see https://bugs.openjdk.org/browse/JDK-8193877
+ public void testNoClassCastException() {
+ String input = "DpNKOGqhjZ";
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern(input));
+ assertThat(e.getCause(), instanceOf(ClassCastException.class));
+ assertThat(e.getMessage(), containsString(input));
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
index 9fd048cd4d2a7..ce0597e7169a4 100644
--- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
+++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
@@ -645,8 +645,8 @@ public void testSymlinkDataDirectory() throws Exception {
try {
Files.createSymbolicLink(symLinkPath, dataPath);
} catch (FileSystemException e) {
- if (IOUtils.WINDOWS && e.getMessage().equals("A required privilege is not held by the client")) {
- throw new AssumptionViolatedException("Symlinks on windows needs admin privileges", e);
+ if (IOUtils.WINDOWS && "A required privilege is not held by the client".equals(e.getReason())) {
+ throw new AssumptionViolatedException("Symlinks on Windows need admin privileges", e);
} else {
throw e;
}
diff --git a/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java b/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java
index 8ca531b678c4a..f7e2bb34740a7 100644
--- a/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java
@@ -13,7 +13,6 @@
import org.elasticsearch.action.ActionType;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
-import org.elasticsearch.health.node.DiskHealthInfo;
import org.elasticsearch.health.node.FetchHealthInfoCacheAction;
import org.elasticsearch.health.node.HealthInfo;
import org.elasticsearch.test.ESTestCase;
@@ -22,17 +21,18 @@
import org.junit.After;
import org.junit.Before;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
+import static org.elasticsearch.core.Tuple.tuple;
import static org.elasticsearch.health.HealthStatus.GREEN;
import static org.elasticsearch.health.HealthStatus.RED;
import static org.elasticsearch.health.HealthStatus.UNKNOWN;
import static org.elasticsearch.health.HealthStatus.YELLOW;
+import static org.elasticsearch.health.node.HealthInfoTests.randomDiskHealthInfo;
+import static org.elasticsearch.health.node.HealthInfoTests.randomRepoHealthInfo;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.is;
@@ -248,12 +248,9 @@ public void testThatIndicatorsGetHealthInfoData() throws Exception {
var networkLatency = new HealthIndicatorResult("network_latency", GREEN, null, null, null, null);
var slowTasks = new HealthIndicatorResult("slow_task_assignment", YELLOW, null, null, null, null);
var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null);
- Map diskHealthInfoMap = new HashMap<>();
- diskHealthInfoMap.put(
- randomAlphaOfLength(30),
- new DiskHealthInfo(randomFrom(HealthStatus.values()), randomFrom(DiskHealthInfo.Cause.values()))
- );
- HealthInfo healthInfo = new HealthInfo(diskHealthInfoMap, DataStreamLifecycleHealthInfo.NO_DSL_ERRORS);
+ var diskHealthInfoMap = randomMap(1, 1, () -> tuple(randomAlphaOfLength(10), randomDiskHealthInfo()));
+ var repoHealthInfoMap = randomMap(1, 1, () -> tuple(randomAlphaOfLength(10), randomRepoHealthInfo()));
+ HealthInfo healthInfo = new HealthInfo(diskHealthInfoMap, DataStreamLifecycleHealthInfo.NO_DSL_ERRORS, repoHealthInfoMap);
var service = new HealthService(
// The preflight indicator does not get data because the data is not fetched until after the preflight check
diff --git a/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java
index 1584c4a57dd32..a622c1ff600d6 100644
--- a/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java
@@ -258,7 +258,7 @@ public void testRedNoBlockedIndicesAndRedAllRoleNodes() throws IOException {
diskInfoByNode.put(discoveryNode.getId(), new DiskHealthInfo(HealthStatus.GREEN));
}
}
- HealthInfo healthInfo = new HealthInfo(diskInfoByNode, DataStreamLifecycleHealthInfo.NO_DSL_ERRORS);
+ HealthInfo healthInfo = new HealthInfo(diskInfoByNode, DataStreamLifecycleHealthInfo.NO_DSL_ERRORS, Map.of());
HealthIndicatorResult result = diskHealthIndicatorService.calculate(true, healthInfo);
assertThat(result.status(), equalTo(HealthStatus.RED));
@@ -1021,7 +1021,7 @@ private HealthInfo createHealthInfo(List healthInfoConfigs) {
diskInfoByNode.put(node.getId(), diskHealthInfo);
}
}
- return new HealthInfo(diskInfoByNode, DataStreamLifecycleHealthInfo.NO_DSL_ERRORS);
+ return new HealthInfo(diskInfoByNode, DataStreamLifecycleHealthInfo.NO_DSL_ERRORS, Map.of());
}
private static ClusterService createClusterService(Collection nodes, boolean withBlockedIndex) {
diff --git a/server/src/test/java/org/elasticsearch/health/node/FetchHealthInfoCacheActionTests.java b/server/src/test/java/org/elasticsearch/health/node/FetchHealthInfoCacheActionTests.java
index f921c03686da4..f497a9f02dda6 100644
--- a/server/src/test/java/org/elasticsearch/health/node/FetchHealthInfoCacheActionTests.java
+++ b/server/src/test/java/org/elasticsearch/health/node/FetchHealthInfoCacheActionTests.java
@@ -29,13 +29,13 @@
import org.junit.BeforeClass;
import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import static org.elasticsearch.health.node.HealthInfoTests.mutateHealthInfo;
import static org.elasticsearch.health.node.HealthInfoTests.randomDslHealthInfo;
+import static org.elasticsearch.health.node.HealthInfoTests.randomRepoHealthInfo;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
@@ -102,7 +102,7 @@ public void testAction() throws ExecutionException, InterruptedException {
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, localNode, allNodes));
HealthInfoCache healthInfoCache = getTestHealthInfoCache();
final FetchHealthInfoCacheAction.Response expectedResponse = new FetchHealthInfoCacheAction.Response(
- new HealthInfo(healthInfoCache.getHealthInfo().diskInfoByNode(), healthInfoCache.getHealthInfo().dslHealthInfo())
+ healthInfoCache.getHealthInfo()
);
ActionTestUtils.execute(
new FetchHealthInfoCacheAction.TransportAction(
@@ -128,35 +128,24 @@ private HealthInfoCache getTestHealthInfoCache() {
healthInfoCache.updateNodeHealth(
nodeId,
new DiskHealthInfo(randomFrom(HealthStatus.values()), randomFrom(DiskHealthInfo.Cause.values())),
- randomDslHealthInfo()
+ randomDslHealthInfo(),
+ randomRepoHealthInfo()
);
}
return healthInfoCache;
}
public void testResponseSerialization() {
- FetchHealthInfoCacheAction.Response response = new FetchHealthInfoCacheAction.Response(
- new HealthInfo(getTestHealthInfoCache().getHealthInfo().diskInfoByNode(), DataStreamLifecycleHealthInfo.NO_DSL_ERRORS)
- );
+ var healthInfo = getTestHealthInfoCache().getHealthInfo();
+ FetchHealthInfoCacheAction.Response response = new FetchHealthInfoCacheAction.Response(healthInfo);
EqualsHashCodeTestUtils.checkEqualsAndHashCode(
response,
- resopnseWritable -> copyWriteable(resopnseWritable, writableRegistry(), FetchHealthInfoCacheAction.Response::new),
+ responseWritable -> copyWriteable(responseWritable, writableRegistry(), FetchHealthInfoCacheAction.Response::new),
this::mutateResponse
);
}
private FetchHealthInfoCacheAction.Response mutateResponse(FetchHealthInfoCacheAction.Response originalResponse) {
- Map