"""
- new ProcessBuilder(
+ def process = [
'buildkite-agent',
'annotate',
'--context',
result.failure ? 'gradle-build-scans-failed' : 'gradle-build-scans',
'--append',
'--style',
- result.failure ? 'error' : 'info',
- body
- )
- .start()
- .waitFor()
+ result.failure ? 'error' : 'info'
+ ].execute()
+ process.withWriter { it.write(body) } // passing the body in as an argument has issues on Windows, so let's use stdin of the process instead
+ process.waitFor()
}
}
} else {
diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy
index 874141f2135ad..38b4cb499eeb9 100644
--- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy
+++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy
@@ -12,6 +12,7 @@ import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask
import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask
+import org.gradle.api.Action
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.file.Directory
@@ -61,16 +62,24 @@ class DocsTestPlugin implements Plugin {
group 'Docs'
description 'List each snippet'
defaultSubstitutions = commonDefaultSubstitutions
- perSnippet { println(it.toString()) }
+ perSnippet = new Action() {
+ @Override
+ void execute(SnippetsTask.Snippet snippet) {
+ println(snippet.toString())
+ }
+ }
}
project.tasks.register('listConsoleCandidates', SnippetsTask) {
group 'Docs'
description
'List snippets that probably should be marked // CONSOLE'
defaultSubstitutions = commonDefaultSubstitutions
- perSnippet {
- if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) {
- println(it.toString())
+ perSnippet = new Action() {
+ @Override
+ void execute(SnippetsTask.Snippet snippet) {
+ if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) {
+ println(it.toString())
+ }
}
}
}
@@ -80,7 +89,7 @@ class DocsTestPlugin implements Plugin {
defaultSubstitutions = commonDefaultSubstitutions
testRoot.convention(restRootDir)
doFirst {
- fileOperations.delete(restRootDir)
+ getFileOperations().delete(testRoot.get())
}
}
diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy
index eda86355ee306..81207181dc9a7 100644
--- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy
+++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy
@@ -10,8 +10,10 @@ package org.elasticsearch.gradle.internal.doc
import groovy.transform.PackageScope
import org.elasticsearch.gradle.internal.doc.SnippetsTask.Snippet
+import org.gradle.api.Action
import org.gradle.api.InvalidUserDataException
import org.gradle.api.file.DirectoryProperty
+import org.gradle.api.internal.file.FileOperations
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.Internal
import org.gradle.api.tasks.OutputDirectory
@@ -24,7 +26,7 @@ import java.nio.file.Path
/**
* Generates REST tests for each snippet marked // TEST.
*/
-class RestTestsFromSnippetsTask extends SnippetsTask {
+abstract class RestTestsFromSnippetsTask extends SnippetsTask {
/**
* These languages aren't supported by the syntax highlighter so we
* shouldn't use them.
@@ -64,13 +66,23 @@ class RestTestsFromSnippetsTask extends SnippetsTask {
@Internal
Set names = new HashSet<>()
+ @Inject
+ abstract FileOperations getFileOperations();
+
@Inject
RestTestsFromSnippetsTask(ObjectFactory objectFactory) {
testRoot = objectFactory.directoryProperty()
TestBuilder builder = new TestBuilder()
- perSnippet builder.&handleSnippet
- doLast builder.&checkUnconverted
- doLast builder.&finishLastTest
+ perSnippet = new Action() {
+ @Override
+ void execute(Snippet snippet) {
+ builder.handleSnippet(snippet)
+ }
+ }
+ doLast {
+ builder.checkUnconverted()
+ builder.finishLastTest()
+ }
}
/**
@@ -190,6 +202,7 @@ class RestTestsFromSnippetsTask extends SnippetsTask {
* Called each time a snippet is encountered. Tracks the snippets and
* calls buildTest to actually build the test.
*/
+
void handleSnippet(Snippet snippet) {
if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) {
unconvertedCandidates.add(snippet.path.toString()
diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy
index 1580ec891ed2b..3e4ad91024082 100644
--- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy
+++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy
@@ -11,8 +11,9 @@ package org.elasticsearch.gradle.internal.doc
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonParseException;
-import com.fasterxml.jackson.core.JsonToken;
+import com.fasterxml.jackson.core.JsonToken
+import org.gradle.api.Action;
import org.gradle.api.DefaultTask
import org.gradle.api.InvalidUserDataException
import org.gradle.api.file.ConfigurableFileTree
@@ -44,7 +45,7 @@ class SnippetsTask extends DefaultTask {
* instance of Snippet.
*/
@Internal
- Closure perSnippet
+ Action perSnippet
/**
* The docs to scan. Defaults to every file in the directory exception the
@@ -134,7 +135,7 @@ class SnippetsTask extends DefaultTask {
+ "After substitutions and munging, the json looks like:\n" + quoted, e);
}
}
- perSnippet(snippet)
+ perSnippet.execute(snippet)
snippet = null
}
file.eachLine('UTF-8') { String line, int lineNumber ->
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java
index f709600fc7979..70d130605c15e 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java
@@ -13,6 +13,7 @@
import org.elasticsearch.gradle.internal.conventions.util.Util;
import org.elasticsearch.gradle.internal.info.BuildParams;
import org.elasticsearch.gradle.internal.precommit.JarHellPrecommitPlugin;
+import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin;
import org.elasticsearch.gradle.plugin.PluginBuildPlugin;
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension;
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster;
@@ -36,6 +37,7 @@ public void apply(Project project) {
project.getPluginManager().apply(PluginBuildPlugin.class);
project.getPluginManager().apply(JarHellPrecommitPlugin.class);
project.getPluginManager().apply(ElasticsearchJavaPlugin.class);
+ project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class);
// Clear default dependencies added by public PluginBuildPlugin as we add our
// own project dependencies for internal builds
// TODO remove once we removed default dependencies from PluginBuildPlugin
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java
index 6849796579ad9..6c7bc6753531c 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java
@@ -11,6 +11,7 @@
import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin;
import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks;
import org.elasticsearch.gradle.internal.snyk.SnykDependencyMonitoringGradlePlugin;
+import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin;
import org.gradle.api.InvalidUserDataException;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
@@ -61,6 +62,7 @@ public void apply(final Project project) {
project.getPluginManager().apply(ElasticsearchJavadocPlugin.class);
project.getPluginManager().apply(DependenciesInfoPlugin.class);
project.getPluginManager().apply(SnykDependencyMonitoringGradlePlugin.class);
+ project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class);
InternalPrecommitTasks.create(project, true);
configureLicenseAndNotice(project);
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java
index d51770ffd30ed..71c76b2045007 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java
@@ -72,20 +72,19 @@ public void apply(Project project) {
createClone.commandLine("git", "clone", buildLayout.getRootDirectory(), gitExtension.getCheckoutDir().get());
});
- ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties();
TaskProvider findRemoteTaskProvider = tasks.register("findRemote", LoggedExec.class, findRemote -> {
findRemote.dependsOn(createCloneTaskProvider);
findRemote.getWorkingDir().set(gitExtension.getCheckoutDir());
findRemote.commandLine("git", "remote", "-v");
findRemote.getCaptureOutput().set(true);
- findRemote.doLast(t -> { extraProperties.set("remoteExists", isRemoteAvailable(remote, findRemote.getOutput())); });
+ findRemote.doLast(t -> System.setProperty("remoteExists", String.valueOf(isRemoteAvailable(remote, findRemote.getOutput()))));
});
TaskProvider addRemoteTaskProvider = tasks.register("addRemote", addRemote -> {
String rootProjectName = project.getRootProject().getName();
addRemote.dependsOn(findRemoteTaskProvider);
- addRemote.onlyIf("remote exists", task -> ((boolean) extraProperties.get("remoteExists")) == false);
+ addRemote.onlyIf("remote exists", task -> (Boolean.valueOf(providerFactory.systemProperty("remoteExists").get()) == false));
addRemote.doLast(new Action() {
@Override
public void execute(Task task) {
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
index 2468711561ae4..f727dc165a8a9 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
@@ -286,11 +286,12 @@ static void createBuildBwcTask(
if (project.getGradle().getStartParameter().isBuildCacheEnabled()) {
c.getArgs().add("--build-cache");
}
+ File rootDir = project.getRootDir();
c.doLast(new Action() {
@Override
public void execute(Task task) {
if (expectedOutputFile.exists() == false) {
- Path relativeOutputPath = project.getRootDir().toPath().relativize(expectedOutputFile.toPath());
+ Path relativeOutputPath = rootDir.toPath().relativize(expectedOutputFile.toPath());
final String message = "Building %s didn't generate expected artifact [%s]. The working branch may be "
+ "out-of-date - try merging in the latest upstream changes to the branch.";
throw new InvalidUserDataException(message.formatted(bwcVersion.get(), relativeOutputPath));
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java
index 16c7bf6d32862..f92789f701049 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java
@@ -23,16 +23,17 @@
import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin;
import org.elasticsearch.gradle.util.GradleUtils;
import org.gradle.api.GradleException;
-import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Dependency;
+import org.gradle.api.artifacts.dsl.DependencyHandler;
import org.gradle.api.provider.Provider;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import java.util.function.Function;
-import static org.elasticsearch.gradle.util.GradleUtils.projectDependency;
-
/**
* An internal elasticsearch build plugin that registers additional
* distribution resolution strategies to the 'elasticsearch.download-distribution' plugin
@@ -64,18 +65,18 @@ public void apply(Project project) {
*
* BWC versions are resolved as project to projects under `:distribution:bwc`.
*/
- private void registerInternalDistributionResolutions(NamedDomainObjectContainer resolutions) {
- resolutions.register("localBuild", distributionResolution -> distributionResolution.setResolver((project, distribution) -> {
+ private void registerInternalDistributionResolutions(List resolutions) {
+ resolutions.add(new DistributionResolution("local-build", (project, distribution) -> {
if (isCurrentVersion(distribution)) {
// non-external project, so depend on local build
return new ProjectBasedDistributionDependency(
- config -> projectDependency(project, distributionProjectPath(distribution), config)
+ config -> projectDependency(project.getDependencies(), distributionProjectPath(distribution), config)
);
}
return null;
}));
- resolutions.register("bwc", distributionResolution -> distributionResolution.setResolver((project, distribution) -> {
+ resolutions.add(new DistributionResolution("bwc", (project, distribution) -> {
BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions()
.unreleasedInfo(Version.fromString(distribution.getVersion()));
if (unreleasedInfo != null) {
@@ -89,7 +90,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer<
}
String projectConfig = getProjectConfig(distribution, unreleasedInfo);
return new ProjectBasedDistributionDependency(
- (config) -> projectDependency(project, unreleasedInfo.gradleProjectPath(), projectConfig)
+ (config) -> projectDependency(project.getDependencies(), unreleasedInfo.gradleProjectPath(), projectConfig)
);
}
return null;
@@ -116,6 +117,13 @@ private static String getProjectConfig(ElasticsearchDistribution distribution, B
}
}
+ private static Dependency projectDependency(DependencyHandler dependencyHandler, String projectPath, String projectConfig) {
+ Map depConfig = new HashMap<>();
+ depConfig.put("path", projectPath);
+ depConfig.put("configuration", projectConfig);
+ return dependencyHandler.project(depConfig);
+ }
+
private static String distributionProjectPath(ElasticsearchDistribution distribution) {
String projectPath = ":distribution";
if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) {
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java
index b32c566363e88..93753f7c7ac56 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java
@@ -143,6 +143,7 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node");
+ map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:graph:qa:with-security");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests");
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java
index 194d0361980ec..bb0b8dcf04437 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java
@@ -377,6 +377,7 @@ public void checkForbidden() {
parameters.getTargetCompatibility().set(getTargetCompatibility());
parameters.getIgnoreFailures().set(getIgnoreFailures());
parameters.getSuccessMarker().set(getSuccessMarker());
+ parameters.getSignaturesFiles().from(getSignaturesFiles());
});
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java
index bcbe1740630ce..42d3a770dbbcc 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java
@@ -101,7 +101,7 @@ public void apply(Project project) {
addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath);
addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString());
t.exclude("**/PackageUpgradeTests.class");
- }, distribution.getArchiveDependencies(), examplePlugin.getDependencies());
+ }, distribution, examplePlugin.getDependencies());
if (distribution.getPlatform() == Platform.WINDOWS) {
windowsTestTasks.add(destructiveTask);
@@ -235,6 +235,7 @@ private static ElasticsearchDistribution createDistro(
d.setBundledJdk(bundledJdk);
}
d.setVersion(version);
+ d.setPreferArchive(true);
});
// Allow us to gracefully omit building Docker distributions if Docker is not available on the system.
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java
new file mode 100644
index 0000000000000..bd9df6d3903ca
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal.test;
+
+import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin;
+import org.gradle.api.Plugin;
+import org.gradle.api.Project;
+import org.gradle.api.artifacts.Configuration;
+import org.gradle.api.artifacts.type.ArtifactTypeDefinition;
+import org.gradle.api.tasks.SourceSet;
+import org.gradle.api.tasks.SourceSetContainer;
+import org.gradle.api.tasks.TaskProvider;
+
+import java.util.Map;
+
+/**
+ * Extracts historical feature metadata into a machine-readable format for use in backward compatibility testing.
+ */
+public class HistoricalFeaturesMetadataPlugin implements Plugin {
+ public static final String HISTORICAL_FEATURES_JSON = "historical-features.json";
+ public static final String FEATURES_METADATA_TYPE = "features-metadata-json";
+ public static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadata";
+
+ @Override
+ public void apply(Project project) {
+ Configuration featureMetadataExtractorConfig = project.getConfigurations().create("featuresMetadataExtractor", c -> {
+ // Don't bother adding this dependency if the project doesn't exist which simplifies testing
+ if (project.findProject(":test:metadata-extractor") != null) {
+ c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":test:metadata-extractor"))));
+ }
+ });
+
+ SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class);
+ SourceSet mainSourceSet = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME);
+
+ TaskProvider generateTask = project.getTasks()
+ .register("generateHistoricalFeaturesMetadata", HistoricalFeaturesMetadataTask.class, task -> {
+ task.setClasspath(
+ featureMetadataExtractorConfig.plus(mainSourceSet.getRuntimeClasspath())
+ .plus(project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME))
+ );
+ task.getOutputFile().convention(project.getLayout().getBuildDirectory().file(HISTORICAL_FEATURES_JSON));
+ });
+
+ Configuration featuresMetadataArtifactConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> {
+ c.setCanBeResolved(false);
+ c.setCanBeConsumed(true);
+ c.attributes(a -> { a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, FEATURES_METADATA_TYPE); });
+ });
+
+ project.getArtifacts().add(featuresMetadataArtifactConfig.getName(), generateTask);
+ }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java
new file mode 100644
index 0000000000000..0891225d1e1ef
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal.test;
+
+import org.elasticsearch.gradle.LoggedExec;
+import org.gradle.api.DefaultTask;
+import org.gradle.api.file.ConfigurableFileCollection;
+import org.gradle.api.file.FileCollection;
+import org.gradle.api.file.RegularFileProperty;
+import org.gradle.api.tasks.CacheableTask;
+import org.gradle.api.tasks.Classpath;
+import org.gradle.api.tasks.OutputFile;
+import org.gradle.api.tasks.TaskAction;
+import org.gradle.process.ExecOperations;
+import org.gradle.workers.WorkAction;
+import org.gradle.workers.WorkParameters;
+import org.gradle.workers.WorkerExecutor;
+
+import javax.inject.Inject;
+
+@CacheableTask
+public abstract class HistoricalFeaturesMetadataTask extends DefaultTask {
+ private FileCollection classpath;
+
+ @OutputFile
+ public abstract RegularFileProperty getOutputFile();
+
+ @Classpath
+ public FileCollection getClasspath() {
+ return classpath;
+ }
+
+ public void setClasspath(FileCollection classpath) {
+ this.classpath = classpath;
+ }
+
+ @Inject
+ public abstract WorkerExecutor getWorkerExecutor();
+
+ @TaskAction
+ public void execute() {
+ getWorkerExecutor().noIsolation().submit(HistoricalFeaturesMetadataWorkAction.class, params -> {
+ params.getClasspath().setFrom(getClasspath());
+ params.getOutputFile().set(getOutputFile());
+ });
+ }
+
+ public interface HistoricalFeaturesWorkParameters extends WorkParameters {
+ ConfigurableFileCollection getClasspath();
+
+ RegularFileProperty getOutputFile();
+ }
+
+ public abstract static class HistoricalFeaturesMetadataWorkAction implements WorkAction {
+ private final ExecOperations execOperations;
+
+ @Inject
+ public HistoricalFeaturesMetadataWorkAction(ExecOperations execOperations) {
+ this.execOperations = execOperations;
+ }
+
+ @Override
+ public void execute() {
+ LoggedExec.javaexec(execOperations, spec -> {
+ spec.getMainClass().set("org.elasticsearch.extractor.features.HistoricalFeaturesMetadataExtractor");
+ spec.classpath(getParameters().getClasspath());
+ spec.args(getParameters().getOutputFile().get().getAsFile().getAbsolutePath());
+ });
+ }
+ }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java
index eacc5da6220ab..be6e3eb377aa1 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java
@@ -22,12 +22,18 @@
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
+import org.gradle.api.Task;
import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.provider.Provider;
import org.gradle.api.provider.ProviderFactory;
+import org.gradle.api.specs.NotSpec;
+import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.Sync;
+import org.gradle.api.tasks.TaskContainer;
import org.gradle.api.tasks.bundling.Zip;
+import java.util.Collections;
+
import javax.inject.Inject;
import static org.elasticsearch.gradle.internal.RestrictedBuildApiService.BUILD_API_RESTRICTIONS_SYS_PROPERTY;
@@ -47,6 +53,7 @@ public class LegacyRestTestBasePlugin implements Plugin {
private static final String TESTS_CLUSTER_REMOTE_ACCESS = "tests.cluster.remote_access";
private ProviderFactory providerFactory;
+ private Project project;
@Inject
public LegacyRestTestBasePlugin(ProviderFactory providerFactory) {
@@ -55,6 +62,7 @@ public LegacyRestTestBasePlugin(ProviderFactory providerFactory) {
@Override
public void apply(Project project) {
+ this.project = project;
Provider serviceProvider = project.getGradle()
.getSharedServices()
.registerIfAbsent("restrictedBuildAPI", RestrictedBuildApiService.class, spec -> {
@@ -118,9 +126,30 @@ public void apply(Project project) {
t.getClusters().forEach(c -> c.plugin(bundle));
}
});
+ configureCacheability(t);
});
}
+ private void configureCacheability(StandaloneRestIntegTestTask testTask) {
+ TaskContainer tasks = project.getTasks();
+ Spec taskSpec = t -> tasks.withType(StandaloneRestIntegTestTask.class)
+ .stream()
+ .filter(task -> task != testTask)
+ .anyMatch(task -> Collections.disjoint(task.getClusters(), testTask.getClusters()) == false);
+ testTask.getOutputs()
+ .doNotCacheIf(
+ "Caching disabled for this task since it uses a cluster shared by other tasks",
+ /*
+ * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster
+ * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To
+ * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between
+ * multiple tasks.
+ */
+ taskSpec
+ );
+ testTask.getOutputs().upToDateWhen(new NotSpec(taskSpec));
+ }
+
private String systemProperty(String propName) {
return providerFactory.systemProperty(propName).getOrNull();
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java
index 9359272b29610..94345ed80eec7 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java
@@ -15,6 +15,7 @@
import org.gradle.api.file.FileSystemOperations;
import org.gradle.api.file.FileTree;
import org.gradle.api.file.ProjectLayout;
+import org.gradle.api.internal.file.FileOperations;
import org.gradle.api.model.ObjectFactory;
import org.gradle.api.provider.ListProperty;
import org.gradle.api.tasks.IgnoreEmptyDirectories;
@@ -43,7 +44,7 @@
*
* @see RestResourcesPlugin
*/
-public class CopyRestTestsTask extends DefaultTask {
+public abstract class CopyRestTestsTask extends DefaultTask {
private static final String REST_TEST_PREFIX = "rest-api-spec/test";
private final ListProperty includeCore;
private final ListProperty includeXpack;
@@ -62,6 +63,9 @@ public class CopyRestTestsTask extends DefaultTask {
private final ProjectLayout projectLayout;
private final FileSystemOperations fileSystemOperations;
+ @Inject
+ public abstract FileOperations getFileOperations();
+
@Inject
public CopyRestTestsTask(
ProjectLayout projectLayout,
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java
index c602a50c2adb8..566e93d8a3f53 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java
@@ -21,6 +21,7 @@
import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin;
import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin;
import org.elasticsearch.gradle.internal.info.BuildParams;
+import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin;
import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin;
import org.elasticsearch.gradle.plugin.PluginBuildPlugin;
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension;
@@ -35,9 +36,12 @@
import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.artifacts.Dependency;
+import org.gradle.api.artifacts.DependencySet;
import org.gradle.api.artifacts.ProjectDependency;
import org.gradle.api.artifacts.type.ArtifactTypeDefinition;
import org.gradle.api.attributes.Attribute;
+import org.gradle.api.file.ConfigurableFileCollection;
+import org.gradle.api.file.FileCollection;
import org.gradle.api.file.FileTree;
import org.gradle.api.provider.ProviderFactory;
import org.gradle.api.tasks.ClasspathNormalizer;
@@ -72,6 +76,9 @@ public class RestTestBasePlugin implements Plugin {
private static final String PLUGINS_CONFIGURATION = "clusterPlugins";
private static final String EXTRACTED_PLUGINS_CONFIGURATION = "extractedPlugins";
private static final Attribute CONFIGURATION_ATTRIBUTE = Attribute.of("test-cluster-artifacts", String.class);
+ private static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadataDeps";
+ private static final String DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION = "defaultDistrofeaturesMetadataDeps";
+ private static final String TESTS_FEATURES_METADATA_PATH = "tests.features.metadata.path";
private final ProviderFactory providerFactory;
@@ -105,6 +112,36 @@ public void apply(Project project) {
extractedPluginsConfiguration.extendsFrom(pluginsConfiguration);
configureArtifactTransforms(project);
+ // Create configuration for aggregating historical feature metadata
+ Configuration featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> {
+ c.setCanBeConsumed(false);
+ c.setCanBeResolved(true);
+ c.attributes(
+ a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE)
+ );
+ c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":server"))));
+ c.withDependencies(dependencies -> {
+ // We can't just use Configuration#extendsFrom() here as we'd inherit the wrong project configuration
+ copyDependencies(project, dependencies, modulesConfiguration);
+ copyDependencies(project, dependencies, pluginsConfiguration);
+ });
+ });
+
+ Configuration defaultDistroFeatureMetadataConfig = project.getConfigurations()
+ .create(DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION, c -> {
+ c.setCanBeConsumed(false);
+ c.setCanBeResolved(true);
+ c.attributes(
+ a -> a.attribute(
+ ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE,
+ HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE
+ )
+ );
+ c.defaultDependencies(
+ d -> d.add(project.getDependencies().project(Map.of("path", ":distribution", "configuration", "featuresMetadata")))
+ );
+ });
+
// For plugin and module projects, register the current project plugin bundle as a dependency
project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> {
if (GradleUtils.isModuleProject(project.getPath())) {
@@ -122,6 +159,10 @@ public void apply(Project project) {
task.dependsOn(integTestDistro, modulesConfiguration);
registerDistributionInputs(task, integTestDistro);
+ // Pass feature metadata on to tests
+ task.getInputs().files(featureMetadataConfig).withPathSensitivity(PathSensitivity.NONE);
+ nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, () -> featureMetadataConfig.getAsPath());
+
// Enable parallel execution for these tests since each test gets its own cluster
task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2);
nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks()));
@@ -134,16 +175,20 @@ public void apply(Project project) {
task.systemProperty("tests.system_call_filter", "false");
// Register plugins and modules as task inputs and pass paths as system properties to tests
- nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulesConfiguration::getAsPath);
- registerConfigurationInputs(task, modulesConfiguration);
- nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginsConfiguration::getAsPath);
- registerConfigurationInputs(task, extractedPluginsConfiguration);
+ var modulePath = project.getObjects().fileCollection().from(modulesConfiguration);
+ nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulePath::getAsPath);
+ registerConfigurationInputs(task, modulesConfiguration.getName(), modulePath);
+ var pluginPath = project.getObjects().fileCollection().from(pluginsConfiguration);
+ nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginPath::getAsPath);
+ registerConfigurationInputs(
+ task,
+ extractedPluginsConfiguration.getName(),
+ project.getObjects().fileCollection().from(extractedPluginsConfiguration)
+ );
// Wire up integ-test distribution by default for all test tasks
- nonInputSystemProperties.systemProperty(
- INTEG_TEST_DISTRIBUTION_SYSPROP,
- () -> integTestDistro.getExtracted().getSingleFile().getPath()
- );
+ FileCollection extracted = integTestDistro.getExtracted();
+ nonInputSystemProperties.systemProperty(INTEG_TEST_DISTRIBUTION_SYSPROP, () -> extracted.getSingleFile().getPath());
nonInputSystemProperties.systemProperty(TESTS_RUNTIME_JAVA_SYSPROP, BuildParams.getRuntimeJavaHome());
// Add `usesDefaultDistribution()` extension method to test tasks to indicate they require the default distro
@@ -157,6 +202,11 @@ public Void call(Object... args) {
DEFAULT_DISTRIBUTION_SYSPROP,
providerFactory.provider(() -> defaultDistro.getExtracted().getSingleFile().getPath())
);
+
+ // If we are using the default distribution we need to register all module feature metadata
+ task.getInputs().files(defaultDistroFeatureMetadataConfig).withPathSensitivity(PathSensitivity.NONE);
+ nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, defaultDistroFeatureMetadataConfig::getAsPath);
+
return null;
}
});
@@ -192,6 +242,14 @@ public Void call(Object... args) {
});
}
+ private void copyDependencies(Project project, DependencySet dependencies, Configuration configuration) {
+ configuration.getDependencies()
+ .stream()
+ .filter(d -> d instanceof ProjectDependency)
+ .map(d -> project.getDependencies().project(Map.of("path", ((ProjectDependency) d).getDependencyProject().getPath())))
+ .forEach(dependencies::add);
+ }
+
private ElasticsearchDistribution createDistribution(Project project, String name, String version) {
return createDistribution(project, name, version, null);
}
@@ -216,15 +274,15 @@ private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Ac
return distribution.getExtracted().getAsFileTree().matching(patternFilter);
}
- private void registerConfigurationInputs(Task task, Configuration configuration) {
+ private void registerConfigurationInputs(Task task, String configurationName, ConfigurableFileCollection configuration) {
task.getInputs()
.files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false)))
- .withPropertyName(configuration.getName() + "-files")
+ .withPropertyName(configurationName + "-files")
.withPathSensitivity(PathSensitivity.RELATIVE);
task.getInputs()
.files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar"))))
- .withPropertyName(configuration.getName() + "-classpath")
+ .withPropertyName(configurationName + "-classpath")
.withNormalizer(ClasspathNormalizer.class);
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java
index 76004e3e5f6db..9b1e8a67deec8 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java
@@ -457,15 +457,17 @@ public void transform() throws IOException {
Collections.singletonList(new Skip(skippedFilesWithReason.get(file)))
);
} else {
+ List> transformations = new ArrayList<>(getTransformations().get());
+
if (skippedFilesWithTestAndReason.containsKey(file)) {
// skip the named tests for this file
skippedFilesWithTestAndReason.get(file).forEach(fullTestNameAndReasonPair -> {
String prefix = file.getName().replace(".yml", "/");
String singleTestName = fullTestNameAndReasonPair.getLeft().replaceAll(".*" + prefix, "");
- getTransformations().add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight()));
+ transformations.add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight()));
});
}
- transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), getTransformations().get());
+ transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), transformations);
}
// convert to url to ensure forward slashes
diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt
index 34f39bbc4ca54..48c888acd35e2 100644
--- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt
+++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt
@@ -158,6 +158,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions()
@defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
org.elasticsearch.cluster.ClusterFeatures#nodeFeatures()
+@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
+org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures()
@defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature)
diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy
index 6b662b8165034..719fae2b463c0 100644
--- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy
+++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy
@@ -34,7 +34,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest {
id 'elasticsearch.testclusters'
}
- class SomeClusterAwareTask extends DefaultTask implements TestClustersAware {
+ abstract class SomeClusterAwareTask extends DefaultTask implements TestClustersAware {
private Collection clusters = new HashSet<>();
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java
index d08dc469e5ba5..e12523870b15b 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java
@@ -11,6 +11,7 @@
import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes;
import org.elasticsearch.gradle.transform.SymbolicLinkPreservingUntarTransform;
import org.elasticsearch.gradle.transform.UnzipTransform;
+import org.gradle.api.Action;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
@@ -22,7 +23,8 @@
import org.gradle.api.provider.Property;
import org.gradle.api.provider.Provider;
-import java.util.Comparator;
+import java.util.ArrayList;
+import java.util.List;
import javax.inject.Inject;
@@ -42,9 +44,10 @@ public class DistributionDownloadPlugin implements Plugin {
private static final String DOWNLOAD_REPO_NAME = "elasticsearch-downloads";
private static final String SNAPSHOT_REPO_NAME = "elasticsearch-snapshots";
public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "es_distro_extracted_";
+ public static final String DISTRO_CONFIG_PREFIX = "es_distro_file_";
private NamedDomainObjectContainer distributionsContainer;
- private NamedDomainObjectContainer distributionsResolutionStrategiesContainer;
+ private List distributionsResolutionStrategies;
private Property dockerAvailability;
@@ -77,7 +80,7 @@ public void apply(Project project) {
private void setupDistributionContainer(Project project, Property dockerAvailable) {
distributionsContainer = project.container(ElasticsearchDistribution.class, name -> {
- Configuration fileConfiguration = project.getConfigurations().create("es_distro_file_" + name);
+ Configuration fileConfiguration = project.getConfigurations().create(DISTRO_CONFIG_PREFIX + name);
Configuration extractedConfiguration = project.getConfigurations().create(DISTRO_EXTRACTED_CONFIG_PREFIX + name);
extractedConfiguration.getAttributes()
.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE);
@@ -85,21 +88,17 @@ private void setupDistributionContainer(Project project, Property docke
name,
project.getObjects(),
dockerAvailability,
- fileConfiguration,
- extractedConfiguration,
- (dist) -> finalizeDistributionDependencies(project, dist)
+ project.getObjects().fileCollection().from(fileConfiguration),
+ project.getObjects().fileCollection().from(extractedConfiguration),
+ new FinalizeDistributionAction(distributionsResolutionStrategies, project)
);
});
project.getExtensions().add(CONTAINER_NAME, distributionsContainer);
}
private void setupResolutionsContainer(Project project) {
- distributionsResolutionStrategiesContainer = project.container(DistributionResolution.class);
- // We want this ordered in the same resolution strategies are added
- distributionsResolutionStrategiesContainer.whenObjectAdded(
- resolveDependencyNotation -> resolveDependencyNotation.setPriority(distributionsResolutionStrategiesContainer.size())
- );
- project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategiesContainer);
+ distributionsResolutionStrategies = new ArrayList<>();
+ project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategies);
}
@SuppressWarnings("unchecked")
@@ -108,30 +107,8 @@ public static NamedDomainObjectContainer getContainer
}
@SuppressWarnings("unchecked")
- public static NamedDomainObjectContainer getRegistrationsContainer(Project project) {
- return (NamedDomainObjectContainer) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME);
- }
-
- private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) {
- DependencyHandler dependencies = project.getDependencies();
- // for the distribution as a file, just depend on the artifact directly
- DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution);
- dependencies.add(distribution.configuration.getName(), distributionDependency.getDefaultNotation());
- // no extraction needed for rpm, deb or docker
- if (distribution.getType().shouldExtract()) {
- // The extracted configuration depends on the artifact directly but has
- // an artifact transform registered to resolve it as an unpacked folder.
- dependencies.add(distribution.getExtracted().getName(), distributionDependency.getExtractedNotation());
- }
- }
-
- private DistributionDependency resolveDependencyNotation(Project p, ElasticsearchDistribution distribution) {
- return distributionsResolutionStrategiesContainer.stream()
- .sorted(Comparator.comparingInt(DistributionResolution::getPriority))
- .map(r -> r.getResolver().resolve(p, distribution))
- .filter(d -> d != null)
- .findFirst()
- .orElseGet(() -> DistributionDependency.of(dependencyNotation(distribution)));
+ public static List getRegistrationsContainer(Project project) {
+ return (List) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME);
}
private static void addIvyRepo(Project project, String name, String url, String group) {
@@ -155,22 +132,53 @@ private static void setupDownloadServiceRepo(Project project) {
addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP);
}
- /**
- * Returns a dependency object representing the given distribution.
- *
- * The returned object is suitable to be passed to {@link DependencyHandler}.
- * The concrete type of the object will be a set of maven coordinates as a {@link String}.
- * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial
- * coordinates that resolve to the Elastic download service through an ivy repository.
- */
- private String dependencyNotation(ElasticsearchDistribution distribution) {
- if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) {
- return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip";
+ private record FinalizeDistributionAction(List resolutionList, Project project)
+ implements
+ Action {
+ @Override
+
+ public void execute(ElasticsearchDistribution distro) {
+ finalizeDistributionDependencies(project, distro);
+ }
+
+ private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) {
+ // for the distribution as a file, just depend on the artifact directly
+ DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution);
+ project.getDependencies().add(DISTRO_CONFIG_PREFIX + distribution.getName(), distributionDependency.getDefaultNotation());
+ // no extraction needed for rpm, deb or docker
+ if (distribution.getType().shouldExtract()) {
+ // The extracted configuration depends on the artifact directly but has
+ // an artifact transform registered to resolve it as an unpacked folder.
+ project.getDependencies()
+ .add(DISTRO_EXTRACTED_CONFIG_PREFIX + distribution.getName(), distributionDependency.getExtractedNotation());
+ }
+ }
+
+ private DistributionDependency resolveDependencyNotation(Project project, ElasticsearchDistribution distro) {
+ return resolutionList.stream()
+ .map(r -> r.getResolver().resolve(project, distro))
+ .filter(d -> d != null)
+ .findFirst()
+ .orElseGet(() -> DistributionDependency.of(dependencyNotation(distro)));
+ }
+
+ /**
+ * Returns a dependency object representing the given distribution.
+ *
+ * The returned object is suitable to be passed to {@link DependencyHandler}.
+ * The concrete type of the object will be a set of maven coordinates as a {@link String}.
+ * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial
+ * coordinates that resolve to the Elastic download service through an ivy repository.
+ */
+ private String dependencyNotation(ElasticsearchDistribution distribution) {
+ if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) {
+ return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip";
+ }
+ Version distroVersion = Version.fromString(distribution.getVersion());
+ String extension = distribution.getType().getExtension(distribution.getPlatform());
+ String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion);
+ String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP;
+ return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension;
}
- Version distroVersion = Version.fromString(distribution.getVersion());
- String extension = distribution.getType().getExtension(distribution.getPlatform());
- String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion);
- String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP;
- return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension;
}
}
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java
index 3b82c9f6975a0..0d8177dea5cb6 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java
@@ -12,9 +12,14 @@
public class DistributionResolution {
private Resolver resolver;
- private String name;
+ private final String name;
private int priority;
+ public DistributionResolution(String name, Resolver resolver) {
+ this(name);
+ this.resolver = resolver;
+ }
+
public DistributionResolution(String name) {
this.name = name;
}
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java
index 5350b6698cb30..eca0fb319cea4 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java
@@ -11,7 +11,8 @@
import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes;
import org.gradle.api.Action;
import org.gradle.api.Buildable;
-import org.gradle.api.artifacts.Configuration;
+import org.gradle.api.file.ConfigurableFileCollection;
+import org.gradle.api.file.FileCollection;
import org.gradle.api.model.ObjectFactory;
import org.gradle.api.provider.Property;
import org.gradle.api.tasks.TaskDependency;
@@ -44,7 +45,7 @@ public String toString() {
private final String name;
private final Property dockerAvailability;
// pkg private so plugin can configure
- final Configuration configuration;
+ final FileCollection configuration;
private final Property architecture;
private final Property version;
@@ -52,7 +53,8 @@ public String toString() {
private final Property platform;
private final Property bundledJdk;
private final Property failIfUnavailable;
- private final Configuration extracted;
+ private final Property preferArchive;
+ private final ConfigurableFileCollection extracted;
private Action distributionFinalizer;
private boolean frozen = false;
@@ -60,8 +62,8 @@ public String toString() {
String name,
ObjectFactory objectFactory,
Property dockerAvailability,
- Configuration fileConfiguration,
- Configuration extractedConfiguration,
+ ConfigurableFileCollection fileConfiguration,
+ ConfigurableFileCollection extractedConfiguration,
Action distributionFinalizer
) {
this.name = name;
@@ -74,6 +76,7 @@ public String toString() {
this.platform = objectFactory.property(Platform.class);
this.bundledJdk = objectFactory.property(Boolean.class);
this.failIfUnavailable = objectFactory.property(Boolean.class).convention(true);
+ this.preferArchive = objectFactory.property(Boolean.class).convention(false);
this.extracted = extractedConfiguration;
this.distributionFinalizer = distributionFinalizer;
}
@@ -140,6 +143,14 @@ public void setFailIfUnavailable(boolean failIfUnavailable) {
this.failIfUnavailable.set(failIfUnavailable);
}
+ public boolean getPreferArchive() {
+ return preferArchive.get();
+ }
+
+ public void setPreferArchive(boolean preferArchive) {
+ this.preferArchive.set(preferArchive);
+ }
+
public void setArchitecture(Architecture architecture) {
this.architecture.set(architecture);
}
@@ -172,7 +183,7 @@ public String getFilepath() {
return configuration.getSingleFile().toString();
}
- public Configuration getExtracted() {
+ public ConfigurableFileCollection getExtracted() {
if (getType().shouldExtract() == false) {
throw new UnsupportedOperationException(
"distribution type [" + getType().getName() + "] for " + "elasticsearch distribution [" + name + "] cannot be extracted"
@@ -187,7 +198,9 @@ public TaskDependency getBuildDependencies() {
return task -> Collections.emptySet();
} else {
maybeFreeze();
- return getType().shouldExtract() ? extracted.getBuildDependencies() : configuration.getBuildDependencies();
+ return getType().shouldExtract() && (preferArchive.get() == false)
+ ? extracted.getBuildDependencies()
+ : configuration.getBuildDependencies();
}
}
@@ -252,13 +265,4 @@ void finalizeValues() {
type.finalizeValue();
bundledJdk.finalizeValue();
}
-
- public TaskDependency getArchiveDependencies() {
- if (skippingDockerDistributionBuild()) {
- return task -> Collections.emptySet();
- } else {
- maybeFreeze();
- return configuration.getBuildDependencies();
- }
- }
}
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java
index 5c98ab3bf4364..e80d2ed64cabd 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java
@@ -12,7 +12,7 @@
import java.util.Collection;
import java.util.HashSet;
-public class DefaultTestClustersTask extends DefaultTask implements TestClustersAware {
+public abstract class DefaultTestClustersTask extends DefaultTask implements TestClustersAware {
private Collection clusters = new HashSet<>();
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java
index 953c0447ec71b..b7d4f91ac6240 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java
@@ -194,7 +194,9 @@ public void beforeStart() {
} catch (IOException e) {
logger.warn("Unable to start APM server", e);
}
-
+ } else {
+ // metrics are enabled by default, if the --with-apm-server was not used we should disable it
+ node.setting("telemetry.metrics.enabled", "false");
}
}
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java
index 2bd8219dc48e5..ba2a5a20c4fbb 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java
@@ -8,11 +8,9 @@
package org.elasticsearch.gradle.testclusters;
import org.elasticsearch.gradle.FileSystemOperationsAware;
-import org.gradle.api.Task;
+import org.gradle.api.provider.ProviderFactory;
import org.gradle.api.services.internal.BuildServiceProvider;
import org.gradle.api.services.internal.BuildServiceRegistryInternal;
-import org.gradle.api.specs.NotSpec;
-import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.CacheableTask;
import org.gradle.api.tasks.Internal;
import org.gradle.api.tasks.Nested;
@@ -28,6 +26,8 @@
import java.util.HashSet;
import java.util.List;
+import javax.inject.Inject;
+
import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.THROTTLE_SERVICE_NAME;
/**
@@ -42,23 +42,6 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl
private boolean debugServer = false;
public StandaloneRestIntegTestTask() {
- Spec taskSpec = t -> getProject().getTasks()
- .withType(StandaloneRestIntegTestTask.class)
- .stream()
- .filter(task -> task != this)
- .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false);
- this.getOutputs()
- .doNotCacheIf(
- "Caching disabled for this task since it uses a cluster shared by other tasks",
- /*
- * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster
- * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To
- * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between
- * multiple tasks.
- */
- taskSpec
- );
- this.getOutputs().upToDateWhen(new NotSpec(taskSpec));
this.getOutputs()
.doNotCacheIf(
"Caching disabled for this task since it is configured to preserve data directory",
@@ -79,6 +62,10 @@ public Collection getClusters() {
return clusters;
}
+ @Override
+ @Inject
+ public abstract ProviderFactory getProviderFactory();
+
@Override
@Internal
public List getSharedResources() {
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java
index 9537162b5d109..09066d4b26e88 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java
@@ -9,17 +9,24 @@
import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration;
+import org.gradle.api.provider.Property;
import org.gradle.api.provider.Provider;
+import org.gradle.api.services.ServiceReference;
import org.gradle.api.tasks.Nested;
import java.util.Collection;
import java.util.concurrent.Callable;
+import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.REGISTRY_SERVICE_NAME;
+
public interface TestClustersAware extends Task {
@Nested
Collection getClusters();
+ @ServiceReference(REGISTRY_SERVICE_NAME)
+ Property getRegistery();
+
default void useCluster(ElasticsearchCluster cluster) {
if (cluster.getPath().equals(getProject().getPath()) == false) {
throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster);
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java
index 72a462c3cd8c9..d2ccda1c1f8c7 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java
@@ -37,6 +37,7 @@
import java.io.File;
import java.util.HashMap;
import java.util.Map;
+import java.util.Set;
import java.util.function.Function;
import javax.inject.Inject;
@@ -49,7 +50,7 @@ public class TestClustersPlugin implements Plugin {
public static final String THROTTLE_SERVICE_NAME = "testClustersThrottle";
private static final String LIST_TASK_NAME = "listTestClusters";
- private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry";
+ public static final String REGISTRY_SERVICE_NAME = "testClustersRegistry";
private static final Logger logger = Logging.getLogger(TestClustersPlugin.class);
private final ProviderFactory providerFactory;
private Provider runtimeJavaProvider;
@@ -222,13 +223,21 @@ private void configureStartClustersHook(
testClusterTasksService.get().register(awareTask.getPath(), awareTask);
awareTask.doFirst(task -> {
awareTask.beforeStart();
- awareTask.getClusters().forEach(registry::maybeStartCluster);
+ awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster);
});
});
});
}
}
+ public static void maybeStartCluster(ElasticsearchCluster cluster, Set runningClusters) {
+ if (runningClusters.contains(cluster)) {
+ return;
+ }
+ runningClusters.add(cluster);
+ cluster.start();
+ }
+
static public abstract class TaskEventsService implements BuildService, OperationCompletionListener {
Map tasksMap = new HashMap<>();
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java
index ce69c4ec476f9..00e5834b0f826 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java
@@ -13,7 +13,6 @@
import org.gradle.api.Task;
import org.gradle.api.UnknownTaskException;
import org.gradle.api.artifacts.Configuration;
-import org.gradle.api.artifacts.Dependency;
import org.gradle.api.artifacts.ModuleDependency;
import org.gradle.api.artifacts.ProjectDependency;
import org.gradle.api.plugins.JavaBasePlugin;
@@ -34,7 +33,6 @@
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
@@ -183,16 +181,6 @@ public static void extendSourceSet(Project project, String parentSourceSetName,
}
}
- public static Dependency projectDependency(Project project, String projectPath, String projectConfig) {
- if (project.findProject(projectPath) == null) {
- throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects());
- }
- Map depConfig = new HashMap<>();
- depConfig.put("path", projectPath);
- depConfig.put("configuration", projectConfig);
- return project.getDependencies().project(depConfig);
- }
-
/**
* To calculate the project path from a task path without relying on Task#getProject() which is discouraged during
* task execution time.
diff --git a/build.gradle b/build.gradle
index d05c2bf53f660..acd8d6788318f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -161,8 +161,10 @@ tasks.register("verifyVersions") {
String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == 'main' }.key
String expectedMapping = "^v${versions.elasticsearch.replaceAll('-SNAPSHOT', '')}\$"
if (versionMapping != expectedMapping) {
- throw new GradleException("Backport label mapping for branch 'main' is '${versionMapping}' but should be " +
- "'${expectedMapping}'. Update .backportrc.json.")
+ throw new GradleException(
+ "Backport label mapping for branch 'main' is '${versionMapping}' but should be " +
+ "'${expectedMapping}'. Update .backportrc.json."
+ )
}
}
}
@@ -211,9 +213,9 @@ allprojects {
project.ext {
// for ide hacks...
isEclipse = providers.systemProperty("eclipse.launcher").isPresent() || // Detects gradle launched from Eclipse's IDE
- providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server
- gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff
- gradle.startParameter.taskNames.contains('cleanEclipse')
+ providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server
+ gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff
+ gradle.startParameter.taskNames.contains('cleanEclipse')
}
ext.bwc_tests_enabled = bwc_tests_enabled
@@ -229,10 +231,10 @@ allprojects {
eclipse.classpath.file.whenMerged { classpath ->
if (false == forbiddenApisTest.bundledSignatures.contains('jdk-non-portable')) {
classpath.entries
- .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") }
- .each {
- it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*"))
- }
+ .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") }
+ .each {
+ it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*"))
+ }
}
}
}
@@ -248,6 +250,8 @@ allprojects {
plugins.withId('lifecycle-base') {
if (project.path.startsWith(":x-pack:")) {
if (project.path.contains("security") || project.path.contains(":ml")) {
+ tasks.register('checkPart4') { dependsOn 'check' }
+ } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) {
tasks.register('checkPart3') { dependsOn 'check' }
} else {
tasks.register('checkPart2') { dependsOn 'check' }
@@ -256,7 +260,7 @@ allprojects {
tasks.register('checkPart1') { dependsOn 'check' }
}
- tasks.register('functionalTests') { dependsOn 'check'}
+ tasks.register('functionalTests') { dependsOn 'check' }
}
/*
@@ -281,7 +285,7 @@ allprojects {
// :test:framework:test cannot run before and after :server:test
return
}
- tasks.matching { it.name.equals('integTest')}.configureEach {integTestTask ->
+ tasks.matching { it.name.equals('integTest') }.configureEach { integTestTask ->
integTestTask.mustRunAfter tasks.matching { it.name.equals("test") }
}
@@ -290,7 +294,7 @@ allprojects {
Project upstreamProject = dep.dependencyProject
if (project.path != upstreamProject?.path) {
for (String taskName : ['test', 'integTest']) {
- project.tasks.matching { it.name == taskName }.configureEach {task ->
+ project.tasks.matching { it.name == taskName }.configureEach { task ->
task.shouldRunAfter(upstreamProject.tasks.matching { upStreamTask -> upStreamTask.name == taskName })
}
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
index fca1e5d29efaf..fdbb5d0c86d6f 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
@@ -18,7 +18,6 @@
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
@@ -241,7 +240,7 @@ static Request search(SearchRequest searchRequest, String searchEndpoint) throws
return request;
}
- static void addSearchRequestParams(Params params, SearchRequest searchRequest) {
+ private static void addSearchRequestParams(Params params, SearchRequest searchRequest) {
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
params.withRouting(searchRequest.routing());
params.withPreference(searchRequest.preference());
@@ -268,53 +267,28 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) {
}
}
- static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException {
- Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll");
- request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE));
- return request;
- }
-
- static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
+ private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS);
}
- static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams)
+ private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams)
throws IOException {
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef();
return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
}
- @Deprecated
- static String endpoint(String index, String type, String id) {
+ private static String endpoint(String index, String type, String id) {
return new EndpointBuilder().addPathPart(index, type, id).build();
}
- static String endpoint(String index, String id) {
+ private static String endpoint(String index, String id) {
return new EndpointBuilder().addPathPart(index, "_doc", id).build();
}
- @Deprecated
- static String endpoint(String index, String type, String id, String endpoint) {
- return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build();
- }
-
- static String endpoint(String[] indices, String endpoint) {
+ private static String endpoint(String[] indices, String endpoint) {
return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build();
}
- @Deprecated
- static String endpoint(String[] indices, String[] types, String endpoint) {
- return new EndpointBuilder().addCommaSeparatedPathParts(indices)
- .addCommaSeparatedPathParts(types)
- .addPathPartAsIs(endpoint)
- .build();
- }
-
- @Deprecated
- static String endpoint(String[] indices, String endpoint, String type) {
- return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build();
- }
-
/**
* Returns a {@link ContentType} from a given {@link XContentType}.
*
@@ -322,7 +296,7 @@ static String endpoint(String[] indices, String endpoint, String type) {
* @return the {@link ContentType}
*/
@SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType")
- public static ContentType createContentType(final XContentType xContentType) {
+ private static ContentType createContentType(final XContentType xContentType) {
return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null);
}
@@ -330,7 +304,7 @@ public static ContentType createContentType(final XContentType xContentType) {
* Utility class to help with common parameter names and patterns. Wraps
* a {@link Request} and adds the parameters to it directly.
*/
- static class Params {
+ private static class Params {
private final Map parameters = new HashMap<>();
Params() {}
@@ -478,7 +452,7 @@ Params withIgnoreUnavailable(boolean ignoreUnavailable) {
*
* @return the {@link IndexRequest}'s content type
*/
- static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) {
+ private static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) {
XContentType requestContentType = indexRequest.getContentType();
if (requestContentType.canonical() != XContentType.JSON && requestContentType.canonical() != XContentType.SMILE) {
throw new IllegalArgumentException(
@@ -505,7 +479,7 @@ static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable
/**
* Utility class to build request's endpoint given its parts as strings
*/
- static class EndpointBuilder {
+ private static class EndpointBuilder {
private final StringJoiner joiner = new StringJoiner("/", "/", "");
@@ -532,7 +506,7 @@ EndpointBuilder addPathPartAsIs(String... parts) {
return this;
}
- String build() {
+ private String build() {
return joiner.toString();
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index b0998957910a2..5d779ea17f534 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -23,7 +23,6 @@
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder;
import org.elasticsearch.aggregations.bucket.adjacency.ParsedAdjacencyMatrix;
@@ -159,7 +158,6 @@
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -180,29 +178,6 @@
* High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The
* {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when
* closing the {@link RestHighLevelClient} instance that wraps it.
- *
- *
- * In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the
- * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used.
- *
- *
- * This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins,
- * or to add support for custom response sections, again added to Elasticsearch through plugins.
- *
- *
- * The majority of the methods in this class come in two flavors, a blocking and an asynchronous version (e.g.
- * {@link #search(SearchRequest, RequestOptions)} and {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}, where the later
- * takes an implementation of an {@link ActionListener} as an argument that needs to implement methods that handle successful responses and
- * failure scenarios. Most of the blocking calls can throw an {@link IOException} or an unchecked {@link ElasticsearchException} in the
- * following cases:
- *
- *
- *
an {@link IOException} is usually thrown in case of failing to parse the REST response in the high-level REST client, the request
- * times out or similar cases where there is no response coming back from the Elasticsearch server
- *
an {@link ElasticsearchException} is usually thrown in case where the server returns a 4xx or 5xx error code. The high-level client
- * then tries to parse the response body error details into a generic ElasticsearchException and suppresses the original
- * {@link ResponseException}
- *
*
* @deprecated The High Level Rest Client is deprecated in favor of the
*
@@ -216,7 +191,7 @@ public class RestHighLevelClient implements Closeable {
/**
* Environment variable determining whether to send the 7.x compatibility header
*/
- public static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING";
+ private static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING";
// To be called using performClientRequest and performClientRequestAsync to ensure version compatibility check
private final RestClient client;
@@ -227,14 +202,6 @@ public class RestHighLevelClient implements Closeable {
/** Do not access directly but through getVersionValidationFuture() */
private volatile ListenableFuture> versionValidationFuture;
- /**
- * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
- * {@link RestClient} to be used to perform requests.
- */
- public RestHighLevelClient(RestClientBuilder restClientBuilder) {
- this(restClientBuilder.build(), RestClient::close, Collections.emptyList());
- }
-
/**
* Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and
* a list of entries that allow to parse custom response sections added to Elasticsearch through plugins.
@@ -331,23 +298,6 @@ public final IndexResponse index(IndexRequest indexRequest, RequestOptions optio
return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet());
}
- /**
- * Executes a search request using the Search API.
- * See Search API on elastic.co
- * @param searchRequest the request
- * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
- * @return the response
- */
- public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException {
- return performRequestAndParseEntity(
- searchRequest,
- r -> RequestConverters.search(r, "_search"),
- options,
- SearchResponse::fromXContent,
- emptySet()
- );
- }
-
/**
* Asynchronously executes a search using the Search API.
* See Search API on elastic.co
@@ -368,27 +318,7 @@ public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions
}
/**
- * Executes a search using the Search Scroll API.
- * See Search
- * Scroll API on elastic.co
- * @param searchScrollRequest the request
- * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
- * @return the response
- */
- public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException {
- return performRequestAndParseEntity(
- searchScrollRequest,
- RequestConverters::searchScroll,
- options,
- SearchResponse::fromXContent,
- emptySet()
- );
- }
-
- /**
- * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
- * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
+ * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions.
*/
@Deprecated
private Resp performRequestAndParseEntity(
@@ -402,8 +332,7 @@ private Resp performRequestAndParseEntity(
}
/**
- * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
- * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
+ * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions.
*/
@Deprecated
private Resp performRequest(
@@ -458,8 +387,7 @@ private Resp internalPerformRequest(
}
/**
- * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
- * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
+ * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions.
* @return Cancellable instance that may be used to cancel the request
*/
@Deprecated
@@ -482,8 +410,7 @@ private Cancellable performRequestAsyncAndPars
}
/**
- * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
- * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
+ * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions.
* @return Cancellable instance that may be used to cancel the request
*/
@Deprecated
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java
deleted file mode 100644
index b7635f7054299..0000000000000
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-package org.elasticsearch.client;
-
-import java.util.Optional;
-
-/**
- * Defines a validation layer for Requests.
- */
-public interface Validatable {
-
- Validatable EMPTY = new Validatable() {
- };
-
- /**
- * Perform validation. This method does not have to be overridden in the event that no validation needs to be done,
- * or the validation was done during object construction time. A {@link ValidationException} that is not null is
- * assumed to contain validation errors and will be thrown.
- *
- * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors.
- */
- default Optional validate() {
- return Optional.empty();
- }
-}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java
deleted file mode 100644
index d5701c5723096..0000000000000
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-package org.elasticsearch.client;
-
-import org.elasticsearch.core.Nullable;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Encapsulates an accumulation of validation errors
- */
-public class ValidationException extends IllegalArgumentException {
-
- /**
- * Creates {@link ValidationException} instance initialized with given error messages.
- * @param error the errors to add
- * @return {@link ValidationException} instance
- */
- public static ValidationException withError(String... error) {
- return withErrors(Arrays.asList(error));
- }
-
- /**
- * Creates {@link ValidationException} instance initialized with given error messages.
- * @param errors the list of errors to add
- * @return {@link ValidationException} instance
- */
- public static ValidationException withErrors(List errors) {
- ValidationException e = new ValidationException();
- for (String error : errors) {
- e.addValidationError(error);
- }
- return e;
- }
-
- private final List validationErrors = new ArrayList<>();
-
- /**
- * Add a new validation error to the accumulating validation errors
- * @param error the error to add
- */
- public void addValidationError(final String error) {
- validationErrors.add(error);
- }
-
- /**
- * Adds validation errors from an existing {@link ValidationException} to
- * the accumulating validation errors
- * @param exception the {@link ValidationException} to add errors from
- */
- public final void addValidationErrors(final @Nullable ValidationException exception) {
- if (exception != null) {
- for (String error : exception.validationErrors()) {
- addValidationError(error);
- }
- }
- }
-
- /**
- * Returns the validation errors accumulated
- */
- public final List validationErrors() {
- return validationErrors;
- }
-
- @Override
- public final String getMessage() {
- StringBuilder sb = new StringBuilder();
- sb.append("Validation Failed: ");
- int index = 0;
- for (String error : validationErrors) {
- sb.append(++index).append(": ").append(error).append(";");
- }
- return sb.toString();
- }
-}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java
index 13a72ee64c03f..f28aabe41f4a9 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java
@@ -22,7 +22,6 @@
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
-import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.xcontent.ParseField;
@@ -71,12 +70,6 @@ public String getType() {
return NAME;
}
- @Override
- protected ValuesSourceRegistry.RegistryKey> getRegistryKey() {
- // This would be called from the same thing that calls innerBuild, which also throws. So it's "safe" to throw here.
- throw new UnsupportedOperationException();
- }
-
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return builder.field(StringStatsAggregationBuilder.SHOW_DISTRIBUTION_FIELD.getPreferredName(), showDistribution);
diff --git a/distribution/build.gradle b/distribution/build.gradle
index 90af1472deb2e..e45f1d09625d6 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -14,6 +14,7 @@ import org.elasticsearch.gradle.internal.ConcatFilesTask
import org.elasticsearch.gradle.internal.DependenciesInfoPlugin
import org.elasticsearch.gradle.internal.NoticeTask
import org.elasticsearch.gradle.internal.info.BuildParams
+import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin
import java.nio.file.Files
import java.nio.file.Path
@@ -30,6 +31,15 @@ configurations {
attribute(Category.CATEGORY_ATTRIBUTE, project.getObjects().named(Category.class, Category.DOCUMENTATION))
}
}
+ featuresMetadata {
+ attributes {
+ attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE)
+ }
+ }
+}
+
+dependencies {
+ featuresMetadata project(':server')
}
def thisProj = project
@@ -196,6 +206,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each {
}
distro.copyModule(processDefaultOutputsTaskProvider, module)
+ dependencies.add('featuresMetadata', module)
if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) {
distro.copyModule(processIntegTestOutputsTaskProvider, module)
}
@@ -214,6 +225,7 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule ->
}
}
distro.copyModule(processDefaultOutputsTaskProvider, xpackModule)
+ dependencies.add('featuresMetadata', xpackModule)
if (xpackModule.name.equals('core') || xpackModule.name.equals('security')) {
distro.copyModule(processIntegTestOutputsTaskProvider, xpackModule)
}
diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle
index 39f9bbf536dda..0a47d0652e465 100644
--- a/distribution/tools/java-version-checker/build.gradle
+++ b/distribution/tools/java-version-checker/build.gradle
@@ -8,15 +8,17 @@ tasks.named(sourceSets.unsupportedJdkVersionEntrypoint.compileJavaTaskName).conf
targetCompatibility = JavaVersion.VERSION_1_8
}
+
tasks.named("jar") {
manifest {
attributes("Multi-Release": "true")
}
+ FileCollection mainOutput = sourceSets.main.output;
from(sourceSets.unsupportedJdkVersionEntrypoint.output)
eachFile { details ->
if (details.path.equals("org/elasticsearch/tools/java_version_checker/JavaVersionChecker.class") &&
- sourceSets.main.output.asFileTree.contains(details.file)) {
+ mainOutput.asFileTree.contains(details.file)) {
details.relativePath = details.relativePath.prepend("META-INF/versions/17")
}
}
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
index b6cd680cb5816..9dcd630f52631 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
@@ -145,7 +145,7 @@ static List apmJvmOptions(Settings settings, @Nullable SecureSettings se
// Configures a log file to write to. Don't disable writing to a log file,
// as the agent will then require extra Security Manager permissions when
// it tries to do something else, and it's just painful.
- propertiesMap.put("log_file", logsDir.resolve("apm-agent.log").toString());
+ propertiesMap.put("log_file", logsDir.resolve("apm-agent.json").toString());
// No point doing anything if we don't have a destination for the trace data, and it can't be configured dynamically
if (propertiesMap.containsKey("server_url") == false && propertiesMap.containsKey("server_urls") == false) {
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java
index 5999f618bc0ab..29650e4b74114 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java
@@ -137,7 +137,7 @@ private List jvmOptions(
);
substitutedJvmOptions.addAll(machineDependentHeap.determineHeapSettings(config, substitutedJvmOptions));
final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions);
- final List systemJvmOptions = SystemJvmOptions.systemJvmOptions();
+ final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings());
final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), args.logsDir(), tmpDir);
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
index a55a303517d6f..6e250075f7747 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
@@ -8,13 +8,16 @@
package org.elasticsearch.server.cli;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
final class SystemJvmOptions {
- static List systemJvmOptions() {
+ static List systemJvmOptions(Settings nodeSettings) {
return Stream.of(
/*
* Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl;
@@ -61,7 +64,8 @@ static List systemJvmOptions() {
* explore alternatives. See org.elasticsearch.xpack.searchablesnapshots.preallocate.Preallocate.
*/
"--add-opens=java.base/java.io=org.elasticsearch.preallocate",
- maybeOverrideDockerCgroup()
+ maybeOverrideDockerCgroup(),
+ maybeSetActiveProcessorCount(nodeSettings)
).filter(e -> e.isEmpty() == false).collect(Collectors.toList());
}
@@ -85,4 +89,16 @@ private static String maybeOverrideDockerCgroup() {
}
return "";
}
+
+ /*
+ * node.processors determines thread pool sizes for Elasticsearch. When it
+ * is set, we need to also tell the JVM to respect a different value
+ */
+ private static String maybeSetActiveProcessorCount(Settings nodeSettings) {
+ if (EsExecutors.NODE_PROCESSORS_SETTING.exists(nodeSettings)) {
+ int allocated = EsExecutors.allocatedProcessors(nodeSettings);
+ return "-XX:ActiveProcessorCount=" + allocated;
+ }
+ return "";
+ }
}
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java
index 5d63f29ac584e..03856b1024992 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java
@@ -8,6 +8,8 @@
package org.elasticsearch.server.cli;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.Strings;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTestCase.WithoutSecurityManager;
@@ -28,10 +30,13 @@
import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.not;
@WithoutSecurityManager
public class JvmOptionsParserTests extends ESTestCase {
@@ -344,4 +349,27 @@ public void accept(final int lineNumber, final String line) {
assertThat(seenInvalidLines, equalTo(invalidLines));
}
+ public void testNodeProcessorsActiveCount() {
+ {
+ final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY);
+ assertThat(jvmOptions, not(hasItem(containsString("-XX:ActiveProcessorCount="))));
+ }
+ {
+ Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1).build();
+ final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings);
+ assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1"));
+ }
+ {
+ // check rounding
+ Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build();
+ final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings);
+ assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1"));
+ }
+ {
+ // check validation
+ Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 10000).build();
+ var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings));
+ assertThat(e.getMessage(), containsString("setting [node.processors] must be <="));
+ }
+ }
}
diff --git a/docs/changelog/100018.yaml b/docs/changelog/100018.yaml
deleted file mode 100644
index b39089db568c0..0000000000000
--- a/docs/changelog/100018.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100018
-summary: Improve time-series error and documentation
-area: "TSDB"
-type: enhancement
-issues: []
diff --git a/docs/changelog/100020.yaml b/docs/changelog/100020.yaml
deleted file mode 100644
index 9f97778860eef..0000000000000
--- a/docs/changelog/100020.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100020
-summary: "[CI] `SearchResponseTests#testSerialization` failing resolved"
-area: Search
-type: bug
-issues:
- - 100005
diff --git a/docs/changelog/100064.yaml b/docs/changelog/100064.yaml
deleted file mode 100644
index f595b7e8e0705..0000000000000
--- a/docs/changelog/100064.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100064
-summary: Update the elastic-apm-agent version
-area: Infra/Core
-type: enhancement
-issues: []
diff --git a/docs/changelog/100092.yaml b/docs/changelog/100092.yaml
deleted file mode 100644
index e86b856caf3ad..0000000000000
--- a/docs/changelog/100092.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100092
-summary: Compute SLM retention from `RepositoryData`
-area: ILM+SLM
-type: bug
-issues:
- - 99953
diff --git a/docs/changelog/100129.yaml b/docs/changelog/100129.yaml
deleted file mode 100644
index aa2c6961b6681..0000000000000
--- a/docs/changelog/100129.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100129
-summary: Refactor `SearchResponseClusters` to use CHM
-area: Search
-type: enhancement
-issues:
- - 99101
diff --git a/docs/changelog/100138.yaml b/docs/changelog/100138.yaml
deleted file mode 100644
index 0df2004f8539d..0000000000000
--- a/docs/changelog/100138.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100138
-summary: Upgrade main to Lucene 9.8.0
-area: Search
-type: upgrade
-issues: []
diff --git a/docs/changelog/100143.yaml b/docs/changelog/100143.yaml
deleted file mode 100644
index c61a2a8bc7a13..0000000000000
--- a/docs/changelog/100143.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100143
-summary: Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order.
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/100154.yaml b/docs/changelog/100154.yaml
deleted file mode 100644
index 5e75102390c61..0000000000000
--- a/docs/changelog/100154.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100154
-summary: Log warnings for jobs unassigned for a long time
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/100187.yaml b/docs/changelog/100187.yaml
deleted file mode 100644
index f0ab9257e7127..0000000000000
--- a/docs/changelog/100187.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-pr: 100187
-summary: GA the data stream lifecycle
-area: Data streams
-type: "feature"
-issues: []
-highlight:
- title: The data stream lifecycle is now in Technical Preview
- body: "This marks the data stream lifecycle as available in Technical Preview.
- Data streams will be able to take advantage of a built-in simplified and
- resilient lifecycle implementation. Data streams with a configured lifecycle will
- be automatically rolled over and tail merged (a forcemerge implementation that's
- lightweight and only merges the long tail of small segments instead of the
- whole shard). With the shard and index maintenance tasks being handled automatically
- to ensure optimum performance, and trade-off between indexing and searching,
- you'll be able to focus on the business related lifecycle aspects like data
- retention."
- notable: true
diff --git a/docs/changelog/100199.yaml b/docs/changelog/100199.yaml
deleted file mode 100644
index 0f609194813c5..0000000000000
--- a/docs/changelog/100199.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100199
-summary: "ESQL: Simple check if all blocks get released"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/100205.yaml b/docs/changelog/100205.yaml
deleted file mode 100644
index 41b16465ef4c5..0000000000000
--- a/docs/changelog/100205.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100205
-summary: Simplify the Inference Ingest Processor configuration
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/100232.yaml b/docs/changelog/100232.yaml
deleted file mode 100644
index 3f8336b6c241c..0000000000000
--- a/docs/changelog/100232.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100232
-summary: "Tracing: Use `doPriv` when working with spans, use `SpanId`"
-area: Infra/Core
-type: bug
-issues: []
diff --git a/docs/changelog/100238.yaml b/docs/changelog/100238.yaml
deleted file mode 100644
index 70e3f5340e223..0000000000000
--- a/docs/changelog/100238.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100238
-summary: "ESQL: Remove aliasing inside Eval"
-area: ES|QL
-type: bug
-issues:
- - 100174
diff --git a/docs/changelog/100253.yaml b/docs/changelog/100253.yaml
deleted file mode 100644
index 7a9d3f3fb13d7..0000000000000
--- a/docs/changelog/100253.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100253
-summary: Propagate cancellation in `DataTiersUsageTransportAction`
-area: Data streams
-type: bug
-issues: []
diff --git a/docs/changelog/100273.yaml b/docs/changelog/100273.yaml
deleted file mode 100644
index 4ccd52d033aa7..0000000000000
--- a/docs/changelog/100273.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100273
-summary: Propagate cancellation in `GetHealthAction`
-area: Health
-type: bug
-issues: []
diff --git a/docs/changelog/100323.yaml b/docs/changelog/100323.yaml
deleted file mode 100644
index de50da6ec8cf9..0000000000000
--- a/docs/changelog/100323.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100323
-summary: "CCR: Use local cluster state request"
-area: CCR
-type: bug
-issues: []
diff --git a/docs/changelog/100351.yaml b/docs/changelog/100351.yaml
deleted file mode 100644
index d8ba19b70cbed..0000000000000
--- a/docs/changelog/100351.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100351
-summary: "ESQL: support metric tsdb fields while querying index patterns"
-area: ES|QL
-type: bug
-issues:
- - 100144
diff --git a/docs/changelog/100360.yaml b/docs/changelog/100360.yaml
deleted file mode 100644
index 6d0dcafe16a8f..0000000000000
--- a/docs/changelog/100360.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100360
-summary: "ESQL: Limit how many bytes `concat()` can process"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/100370.yaml b/docs/changelog/100370.yaml
deleted file mode 100644
index 3e2e1b762c654..0000000000000
--- a/docs/changelog/100370.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 100370
-summary: "ESQL: Page shouldn't close a block twice"
-area: ES|QL
-type: bug
-issues:
- - 100356
- - 100365
diff --git a/docs/changelog/100377.yaml b/docs/changelog/100377.yaml
deleted file mode 100644
index a4cbb0ba46a61..0000000000000
--- a/docs/changelog/100377.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100377
-summary: "ESQL: Add identity check in Block equality"
-area: ES|QL
-type: bug
-issues:
- - 100374
diff --git a/docs/changelog/100388.yaml b/docs/changelog/100388.yaml
deleted file mode 100644
index 4b596b6ea23b6..0000000000000
--- a/docs/changelog/100388.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100388
-summary: Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent.
-area: Machine Learning
-type: bug
-issues:
- - 100180
diff --git a/docs/changelog/100447.yaml b/docs/changelog/100447.yaml
deleted file mode 100644
index c20eb1599cf41..0000000000000
--- a/docs/changelog/100447.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100447
-summary: Reinstate `RepositoryData` BwC
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/100470.yaml b/docs/changelog/100470.yaml
deleted file mode 100644
index 3408ae06f7fe9..0000000000000
--- a/docs/changelog/100470.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100470
-summary: DSL waits for the tsdb time boundaries to lapse
-area: Data streams
-type: bug
-issues:
- - 99696
diff --git a/docs/changelog/100594.yaml b/docs/changelog/100594.yaml
deleted file mode 100644
index 62d2a8933b9ad..0000000000000
--- a/docs/changelog/100594.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100594
-summary: Grant editor and viewer access to profiling
-area: Authorization
-type: bug
-issues: []
diff --git a/docs/changelog/100610.yaml b/docs/changelog/100610.yaml
deleted file mode 100644
index 7423ce9225868..0000000000000
--- a/docs/changelog/100610.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 100610
-summary: Fix interruption of `markAllocationIdAsInSync`
-area: Recovery
-type: bug
-issues:
- - 96578
- - 100589
diff --git a/docs/changelog/100624.yaml b/docs/changelog/100624.yaml
deleted file mode 100644
index 247343bf03ed8..0000000000000
--- a/docs/changelog/100624.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100624
-summary: Make Transform Feature Reset really wait for all the tasks
-area: Transform
-type: bug
-issues: []
diff --git a/docs/changelog/100645.yaml b/docs/changelog/100645.yaml
deleted file mode 100644
index e6bb6ab0fd653..0000000000000
--- a/docs/changelog/100645.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 100645
-summary: "ESQL: Graceful handling of non-bool condition in the filter"
-area: ES|QL
-type: bug
-issues:
- - 100049
- - 100409
diff --git a/docs/changelog/100647.yaml b/docs/changelog/100647.yaml
deleted file mode 100644
index 399407146af68..0000000000000
--- a/docs/changelog/100647.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100647
-summary: "ESQL: Handle queries with non-existing enrich policies and no field"
-area: ES|QL
-type: bug
-issues:
- - 100593
diff --git a/docs/changelog/100650.yaml b/docs/changelog/100650.yaml
deleted file mode 100644
index 96d7bc0571403..0000000000000
--- a/docs/changelog/100650.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100650
-summary: "ESQL: Improve verifier error for incorrect agg declaration"
-area: ES|QL
-type: bug
-issues:
- - 100641
diff --git a/docs/changelog/100656.yaml b/docs/changelog/100656.yaml
deleted file mode 100644
index 1ee9a2ad0e47a..0000000000000
--- a/docs/changelog/100656.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100656
-summary: "ESQL: fix non-null value being returned for unsupported data types in `ValueSources`"
-area: ES|QL
-type: bug
-issues:
- - 100048
diff --git a/docs/changelog/100707.yaml b/docs/changelog/100707.yaml
deleted file mode 100644
index 6808b781b603a..0000000000000
--- a/docs/changelog/100707.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100707
-summary: Allow `enrich_user` to read/view enrich indices
-area: Authorization
-type: bug
-issues: []
diff --git a/docs/changelog/100760.yaml b/docs/changelog/100760.yaml
deleted file mode 100644
index b8d149fff5758..0000000000000
--- a/docs/changelog/100760.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100760
-summary: Remove noisy 'Could not find trained model' message
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/100766.yaml b/docs/changelog/100766.yaml
deleted file mode 100644
index c7a3d0479afd6..0000000000000
--- a/docs/changelog/100766.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100766
-summary: "ESQL: Properly handle multi-values in fold() and date math"
-area: ES|QL
-type: bug
-issues:
- - 100497
diff --git a/docs/changelog/100779.yaml b/docs/changelog/100779.yaml
deleted file mode 100644
index 2d7f40f5b34da..0000000000000
--- a/docs/changelog/100779.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100779
-summary: Fix NullPointerException in RotableSecret
-area: Security
-type: bug
-issues:
- - 99759
diff --git a/docs/changelog/100782.yaml b/docs/changelog/100782.yaml
deleted file mode 100644
index c6007bfb4d9ba..0000000000000
--- a/docs/changelog/100782.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-pr: 100782
-summary: "ESQL: `mv_expand` pushes down limit and project and keep the limit after\
- \ it untouched"
-area: ES|QL
-type: bug
-issues:
- - 99971
- - 100774
diff --git a/docs/changelog/100808.yaml b/docs/changelog/100808.yaml
deleted file mode 100644
index 1abbfdcebf74e..0000000000000
--- a/docs/changelog/100808.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100808
-summary: Make tasks that calculate checkpoints cancellable
-area: Transform
-type: bug
-issues: []
diff --git a/docs/changelog/100846.yaml b/docs/changelog/100846.yaml
deleted file mode 100644
index d13fb78b697a2..0000000000000
--- a/docs/changelog/100846.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100846
-summary: Consistent scores for multi-term `SourceConfirmedTestQuery`
-area: Search
-type: bug
-issues:
- - 98712
diff --git a/docs/changelog/100866.yaml b/docs/changelog/100866.yaml
deleted file mode 100644
index 67a22cc1e0996..0000000000000
--- a/docs/changelog/100866.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100866
-summary: "ESQL: Preserve intermediate aggregation output in local relation"
-area: ES|QL
-type: bug
-issues:
- - 100807
diff --git a/docs/changelog/100872.yaml b/docs/changelog/100872.yaml
deleted file mode 100644
index 9877afa28982e..0000000000000
--- a/docs/changelog/100872.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100872
-summary: Improve painless error wrapping
-area: Infra/Scripting
-type: bug
-issues: []
diff --git a/docs/changelog/100875.yaml b/docs/changelog/100875.yaml
deleted file mode 100644
index bd0ca59e8b8f0..0000000000000
--- a/docs/changelog/100875.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100875
-summary: Preserve subfields for unsupported types
-area: "Query Languages"
-type: bug
-issues:
- - 100869
diff --git a/docs/changelog/100886.yaml b/docs/changelog/100886.yaml
deleted file mode 100644
index b926f924c7a7c..0000000000000
--- a/docs/changelog/100886.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 100886
-summary: Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment]
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/100911.yaml b/docs/changelog/100911.yaml
deleted file mode 100644
index baab6f2482a76..0000000000000
--- a/docs/changelog/100911.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 100911
-summary: '`WaitForSnapshotStep` verifies if the index belongs to the latest snapshot
- of that SLM policy'
-area: ILM+SLM
-type: bug
-issues: []
diff --git a/docs/changelog/101001.yaml b/docs/changelog/101001.yaml
deleted file mode 100644
index 3ebcefc2c8045..0000000000000
--- a/docs/changelog/101001.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101001
-summary: "ESQL: Support date and time intervals as input params"
-area: ES|QL
-type: bug
-issues:
- - 99570
diff --git a/docs/changelog/101012.yaml b/docs/changelog/101012.yaml
deleted file mode 100644
index 1d5f62bdddba7..0000000000000
--- a/docs/changelog/101012.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101012
-summary: Adjust `DateHistogram's` bucket accounting to be iteratively
-area: Aggregations
-type: bug
-issues: []
diff --git a/docs/changelog/101051.yaml b/docs/changelog/101051.yaml
deleted file mode 100644
index 05e7443dac8b3..0000000000000
--- a/docs/changelog/101051.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101051
-summary: Percolator to support parsing script score query with params
-area: Mapping
-type: bug
-issues:
- - 97377
diff --git a/docs/changelog/101120.yaml b/docs/changelog/101120.yaml
deleted file mode 100644
index bf359eb21be9f..0000000000000
--- a/docs/changelog/101120.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101120
-summary: "ESQL: Fix escaping of backslash in LIKE operator"
-area: ES|QL
-type: bug
-issues:
- - 101106
diff --git a/docs/changelog/101133.yaml b/docs/changelog/101133.yaml
deleted file mode 100644
index 546a5392c309a..0000000000000
--- a/docs/changelog/101133.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101133
-summary: Update bundled JDK to 21.0.1
-area: Packaging
-type: upgrade
-issues: []
diff --git a/docs/changelog/101184.yaml b/docs/changelog/101184.yaml
deleted file mode 100644
index ac2f5f3ee8af1..0000000000000
--- a/docs/changelog/101184.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101184
-summary: More robust timeout for repo analysis
-area: Snapshot/Restore
-type: bug
-issues:
- - 101182
diff --git a/docs/changelog/101205.yaml b/docs/changelog/101205.yaml
deleted file mode 100644
index 528f6fb35846e..0000000000000
--- a/docs/changelog/101205.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101205
-summary: Increase K/V look-back time interval
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/101212.yaml b/docs/changelog/101212.yaml
deleted file mode 100644
index ed2b433209e8d..0000000000000
--- a/docs/changelog/101212.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101212
-summary: Fix painless execute api and tsdb issue
-area: TSDB
-type: bug
-issues:
- - 101072
diff --git a/docs/changelog/101245.yaml b/docs/changelog/101245.yaml
deleted file mode 100644
index 2f9fef318f31a..0000000000000
--- a/docs/changelog/101245.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101245
-summary: Make S3 anti-contention delay configurable
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/101255.yaml b/docs/changelog/101255.yaml
deleted file mode 100644
index 37d8f7e3c14fe..0000000000000
--- a/docs/changelog/101255.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101255
-summary: Provide stable resampling
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/101264.yaml b/docs/changelog/101264.yaml
deleted file mode 100644
index 7160240b2f3a0..0000000000000
--- a/docs/changelog/101264.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101264
-summary: Align look-back with client-side cache
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/101265.yaml b/docs/changelog/101265.yaml
deleted file mode 100644
index f39b57fa9a75e..0000000000000
--- a/docs/changelog/101265.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-pr: 101265
-summary: Rollup functionality is now deprecated
-area: Rollup
-type: deprecation
-issues: []
-deprecation:
- title: >-
- Rollup functionality is now deprecated
- area: Rollup
- details: |-
- {ref}/xpack-rollup[Rollup functionality] has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview.
- impact: |-
- Use {ref}/downsampling.html[downsampling] to reduce storage costs for time series data by by storing it at reduced granularity.
diff --git a/docs/changelog/101333.yaml b/docs/changelog/101333.yaml
new file mode 100644
index 0000000000000..4452687b995d3
--- /dev/null
+++ b/docs/changelog/101333.yaml
@@ -0,0 +1,29 @@
+pr: 101333
+summary: Fixed JWT principal from claims
+area: Authorization
+type: breaking
+issues: []
+breaking:
+ title: Fixed JWT principal from claims
+ area: Authorization
+ details: "This changes the format of a JWT's principal before the JWT is actually\
+ \ validated by any JWT realm. The JWT's principal is a convenient way to refer\
+ \ to a JWT that has not yet been verified by a JWT realm. The JWT's principal\
+ \ is printed in the audit and regular logs (notably for auditing authn failures)\
+ \ as well as the smart realm chain reordering optimization. The JWT principal\
+ \ is NOT required to be identical to the JWT-authenticated user's principal, but\
+ \ in general, they should be similar. Previously, the JWT's principal was built\
+ \ by individual realms in the same way the realms built the authenticated user's\
+ \ principal. This had the advantage that, in simpler JWT realms configurations\
+ \ (e.g. a single JWT realm in the chain), the JWT principal and the authenticated\
+ \ user's principal are very similar. However the drawback is that, in general,\
+ \ the JWT principal and the user principal can be very different (i.e. in the\
+ \ case where one JWT realm builds the JWT principal and a different one builds\
+ \ the user principal). Another downside is that the (unauthenticated) JWT principal\
+ \ depended on realm ordering, which makes identifying the JWT from its principal\
+ \ dependent on the ES authn realm configuration. This PR implements a consistent\
+ \ fixed logic to build the JWT principal, which only depends on the JWT's claims\
+ \ and no ES configuration."
+ impact: "Users will observe changed format and values for the `user.name` attribute\
+ \ of `authentication_failed` audit log events, in the JWT (failed) authn case."
+ notable: false
diff --git a/docs/changelog/101344.yaml b/docs/changelog/101344.yaml
deleted file mode 100644
index b546e743301f6..0000000000000
--- a/docs/changelog/101344.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101344
-summary: Register `repository_s3` settings
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/101358.yaml b/docs/changelog/101358.yaml
deleted file mode 100644
index 3ae2a44e15e5e..0000000000000
--- a/docs/changelog/101358.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101358
-summary: Make DISSECT parameter `append_separator` case insensitive
-area: ES|QL
-type: bug
-issues:
- - 101138
diff --git a/docs/changelog/101362.yaml b/docs/changelog/101362.yaml
deleted file mode 100644
index e1d763cd416fa..0000000000000
--- a/docs/changelog/101362.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101362
-summary: "ESQL: Remove the swapped-args check for date_xxx()"
-area: ES|QL
-type: enhancement
-issues:
- - 99562
diff --git a/docs/changelog/101438.yaml b/docs/changelog/101438.yaml
deleted file mode 100644
index 8189ee96b6576..0000000000000
--- a/docs/changelog/101438.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101438
-summary: "ESQL: Fix eval of functions on foldable literals"
-area: ES|QL
-type: bug
-issues:
- - 101425
diff --git a/docs/changelog/101456.yaml b/docs/changelog/101456.yaml
deleted file mode 100644
index db55dfbde1c64..0000000000000
--- a/docs/changelog/101456.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101456
-summary: "ESQL: adds Enrich implicit `match_fields` to `field_caps` call"
-area: ES|QL
-type: bug
-issues:
- - 101328
diff --git a/docs/changelog/101486.yaml b/docs/changelog/101486.yaml
deleted file mode 100644
index 99795feda328f..0000000000000
--- a/docs/changelog/101486.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101486
-summary: Improving tika handling
-area: Ingest Node
-type: bug
-issues: []
diff --git a/docs/changelog/101492.yaml b/docs/changelog/101492.yaml
deleted file mode 100644
index 2c3cdeee21bbb..0000000000000
--- a/docs/changelog/101492.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101492
-summary: "ESQL: check type before casting"
-area: ES|QL
-type: bug
-issues:
- - 101489
diff --git a/docs/changelog/101495.yaml b/docs/changelog/101495.yaml
deleted file mode 100644
index f61c9b824b77c..0000000000000
--- a/docs/changelog/101495.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101495
-summary: "[DSL] skip deleting indices that have in-progress downsampling operations"
-area: Data streams
-type: bug
-issues: []
diff --git a/docs/changelog/101497.yaml b/docs/changelog/101497.yaml
deleted file mode 100644
index 7909cb1ecdc0d..0000000000000
--- a/docs/changelog/101497.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101497
-summary: Fix snapshot double finalization
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/101516.yaml b/docs/changelog/101516.yaml
deleted file mode 100644
index a5445102c33c6..0000000000000
--- a/docs/changelog/101516.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101516
-summary: "Make settings dynamic"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/101577.yaml b/docs/changelog/101577.yaml
new file mode 100644
index 0000000000000..e485fd3811cb6
--- /dev/null
+++ b/docs/changelog/101577.yaml
@@ -0,0 +1,5 @@
+pr: 101577
+summary: Add metrics to the shared blob cache
+area: Search
+type: enhancement
+issues: []
diff --git a/docs/changelog/101609.yaml b/docs/changelog/101609.yaml
new file mode 100644
index 0000000000000..27993574743d2
--- /dev/null
+++ b/docs/changelog/101609.yaml
@@ -0,0 +1,9 @@
+pr: 101609
+summary: >
+ Add a node feature join barrier. This prevents nodes from joining clusters that do not have
+ all the features already present in the cluster. This ensures that once a features is supported
+ by all the nodes in a cluster, that feature will never then not be supported in the future.
+ This is the corresponding functionality for the version join barrier, but for features
+area: "Cluster Coordination"
+type: feature
+issues: []
diff --git a/docs/changelog/101627.yaml b/docs/changelog/101627.yaml
deleted file mode 100644
index 07992efd8bb3c..0000000000000
--- a/docs/changelog/101627.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101627
-summary: Ignore `IndexNotFound` error when refreshing destination index
-area: Transform
-type: bug
-issues: []
diff --git a/docs/changelog/101629.yaml b/docs/changelog/101629.yaml
deleted file mode 100644
index 1b8691c9798ff..0000000000000
--- a/docs/changelog/101629.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101629
-summary: Health report infrastructure doesn't trip the circuit breakers
-area: Health
-type: bug
-issues: []
diff --git a/docs/changelog/101648.yaml b/docs/changelog/101648.yaml
deleted file mode 100644
index 48e01739aabc0..0000000000000
--- a/docs/changelog/101648.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101648
-summary: "ESQL: Fix unreleased block in topn"
-area: ES|QL
-type: bug
-issues:
- - 101588
diff --git a/docs/changelog/101652.yaml b/docs/changelog/101652.yaml
deleted file mode 100644
index 79e3167696aee..0000000000000
--- a/docs/changelog/101652.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101652
-summary: Fix race condition in `SnapshotsService`
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/101660.yaml b/docs/changelog/101660.yaml
new file mode 100644
index 0000000000000..cb3d3118d15a6
--- /dev/null
+++ b/docs/changelog/101660.yaml
@@ -0,0 +1,6 @@
+pr: 101660
+summary: Fall through malformed JWTs to subsequent realms in the chain
+area: Authentication
+type: bug
+issues:
+ - 101367
diff --git a/docs/changelog/101682.yaml b/docs/changelog/101682.yaml
new file mode 100644
index 0000000000000..e512006057581
--- /dev/null
+++ b/docs/changelog/101682.yaml
@@ -0,0 +1,5 @@
+pr: 101682
+summary: "Add manage_enrich cluster privilege to kibana_system role"
+area: Authentication
+type: enhancement
+issues: []
diff --git a/docs/changelog/101700.yaml b/docs/changelog/101700.yaml
new file mode 100644
index 0000000000000..08671360688a7
--- /dev/null
+++ b/docs/changelog/101700.yaml
@@ -0,0 +1,5 @@
+pr: 101700
+summary: Fix `lastUnsafeSegmentGenerationForGets` for realtime get
+area: Engine
+type: bug
+issues: []
diff --git a/docs/changelog/101713.yaml b/docs/changelog/101713.yaml
deleted file mode 100644
index c3addf9296584..0000000000000
--- a/docs/changelog/101713.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 101713
-summary: Disable `weight_matches` when kNN query is present
-area: Highlighting
-type: bug
-issues: []
diff --git a/docs/changelog/101723.yaml b/docs/changelog/101723.yaml
new file mode 100644
index 0000000000000..146d164805f00
--- /dev/null
+++ b/docs/changelog/101723.yaml
@@ -0,0 +1,6 @@
+pr: 101723
+summary: Allowing non-dynamic index settings to be updated by automatically unassigning
+ shards
+area: Indices APIs
+type: enhancement
+issues: []
diff --git a/docs/changelog/101753.yaml b/docs/changelog/101753.yaml
new file mode 100644
index 0000000000000..7b64075998430
--- /dev/null
+++ b/docs/changelog/101753.yaml
@@ -0,0 +1,5 @@
+pr: 101753
+summary: Expose roles by default in cat allocation API
+area: CAT APIs
+type: enhancement
+issues: []
diff --git a/docs/changelog/101788.yaml b/docs/changelog/101788.yaml
new file mode 100644
index 0000000000000..b7cc1e20663e8
--- /dev/null
+++ b/docs/changelog/101788.yaml
@@ -0,0 +1,6 @@
+pr: 101788
+summary: "ESQL: Narrow catch in convert functions"
+area: ES|QL
+type: bug
+issues:
+ - 100820
diff --git a/docs/changelog/101802.yaml b/docs/changelog/101802.yaml
new file mode 100644
index 0000000000000..20e857c32f664
--- /dev/null
+++ b/docs/changelog/101802.yaml
@@ -0,0 +1,5 @@
+pr: 101802
+summary: Correctly logging watcher history write failures
+area: Watcher
+type: bug
+issues: []
diff --git a/docs/changelog/101815.yaml b/docs/changelog/101815.yaml
new file mode 100644
index 0000000000000..511e23beb68ef
--- /dev/null
+++ b/docs/changelog/101815.yaml
@@ -0,0 +1,5 @@
+pr: 101815
+summary: Run `TransportGetAliasesAction` on local node
+area: Indices APIs
+type: enhancement
+issues: []
diff --git a/docs/changelog/101826.yaml b/docs/changelog/101826.yaml
new file mode 100644
index 0000000000000..87f3f8df1b0c2
--- /dev/null
+++ b/docs/changelog/101826.yaml
@@ -0,0 +1,6 @@
+pr: 101826
+summary: Support keyed histograms
+area: Aggregations
+type: enhancement
+issues:
+ - 100242
diff --git a/docs/changelog/101846.yaml b/docs/changelog/101846.yaml
new file mode 100644
index 0000000000000..52dfff8801c62
--- /dev/null
+++ b/docs/changelog/101846.yaml
@@ -0,0 +1,5 @@
+pr: 101846
+summary: Set `ActiveProcessorCount` when `node.processors` is set
+area: Infra/CLI
+type: enhancement
+issues: []
diff --git a/docs/changelog/101847.yaml b/docs/changelog/101847.yaml
new file mode 100644
index 0000000000000..91922b9e23ed0
--- /dev/null
+++ b/docs/changelog/101847.yaml
@@ -0,0 +1,6 @@
+pr: 101847
+summary: Add an additional tiebreaker to RRF
+area: Ranking
+type: bug
+issues:
+ - 101232
diff --git a/docs/changelog/101859.yaml b/docs/changelog/101859.yaml
new file mode 100644
index 0000000000000..54f3fb12810ca
--- /dev/null
+++ b/docs/changelog/101859.yaml
@@ -0,0 +1,6 @@
+pr: 101859
+summary: Cover head/tail commands edge cases and data types coverage
+area: EQL
+type: bug
+issues:
+ - 101724
diff --git a/docs/changelog/101868.yaml b/docs/changelog/101868.yaml
new file mode 100644
index 0000000000000..d7cf650d25ed2
--- /dev/null
+++ b/docs/changelog/101868.yaml
@@ -0,0 +1,5 @@
+pr: 101868
+summary: Read scores from downloaded vocabulary for XLM Roberta tokenizers
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/101915.yaml b/docs/changelog/101915.yaml
new file mode 100644
index 0000000000000..aed7ca62021a5
--- /dev/null
+++ b/docs/changelog/101915.yaml
@@ -0,0 +1,5 @@
+pr: 101915
+summary: Add inference counts by model to the machine learning usage stats
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/101989.yaml b/docs/changelog/101989.yaml
new file mode 100644
index 0000000000000..d294d194bd4e8
--- /dev/null
+++ b/docs/changelog/101989.yaml
@@ -0,0 +1,5 @@
+pr: 101989
+summary: Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats`
+area: Health
+type: enhancement
+issues: []
diff --git a/docs/changelog/102020.yaml b/docs/changelog/102020.yaml
new file mode 100644
index 0000000000000..7c74e9676d342
--- /dev/null
+++ b/docs/changelog/102020.yaml
@@ -0,0 +1,5 @@
+pr: 102020
+summary: Retrieve stacktrace events from a custom index
+area: Application
+type: enhancement
+issues: []
diff --git a/docs/changelog/102048.yaml b/docs/changelog/102048.yaml
new file mode 100644
index 0000000000000..54bc1d9eae52e
--- /dev/null
+++ b/docs/changelog/102048.yaml
@@ -0,0 +1,5 @@
+pr: 102048
+summary: "Repo analysis: verify empty register"
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/docs/changelog/102051.yaml b/docs/changelog/102051.yaml
new file mode 100644
index 0000000000000..c3ca4a546928f
--- /dev/null
+++ b/docs/changelog/102051.yaml
@@ -0,0 +1,5 @@
+pr: 102051
+summary: "Repo analysis: allow configuration of register ops"
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/docs/changelog/102056.yaml b/docs/changelog/102056.yaml
new file mode 100644
index 0000000000000..455f66ba90b03
--- /dev/null
+++ b/docs/changelog/102056.yaml
@@ -0,0 +1,5 @@
+pr: 102056
+summary: Use `BulkRequest` to store Application Privileges
+area: Authorization
+type: enhancement
+issues: []
diff --git a/docs/changelog/102057.yaml b/docs/changelog/102057.yaml
new file mode 100644
index 0000000000000..d5b664ba14c29
--- /dev/null
+++ b/docs/changelog/102057.yaml
@@ -0,0 +1,6 @@
+pr: 102057
+summary: Simplify `BlobStoreRepository` idle check
+area: Snapshot/Restore
+type: bug
+issues:
+ - 101948
diff --git a/docs/changelog/102065.yaml b/docs/changelog/102065.yaml
new file mode 100644
index 0000000000000..1a9a219df4502
--- /dev/null
+++ b/docs/changelog/102065.yaml
@@ -0,0 +1,5 @@
+pr: 102065
+summary: Add more desired balance stats
+area: Allocation
+type: enhancement
+issues: []
diff --git a/docs/changelog/102075.yaml b/docs/changelog/102075.yaml
new file mode 100644
index 0000000000000..54daae04169db
--- /dev/null
+++ b/docs/changelog/102075.yaml
@@ -0,0 +1,5 @@
+pr: 102075
+summary: Accept a single or multiple inputs to `_inference`
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/102089.yaml b/docs/changelog/102089.yaml
new file mode 100644
index 0000000000000..9f33c0648d09f
--- /dev/null
+++ b/docs/changelog/102089.yaml
@@ -0,0 +1,5 @@
+pr: 102089
+summary: Add prefix strings option to trained models
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/102114.yaml b/docs/changelog/102114.yaml
new file mode 100644
index 0000000000000..a08389da0351b
--- /dev/null
+++ b/docs/changelog/102114.yaml
@@ -0,0 +1,6 @@
+pr: 102114
+summary: Fix double-completion in `SecurityUsageTransportAction`
+area: Security
+type: bug
+issues:
+ - 102111
diff --git a/docs/changelog/102140.yaml b/docs/changelog/102140.yaml
new file mode 100644
index 0000000000000..0f086649b9710
--- /dev/null
+++ b/docs/changelog/102140.yaml
@@ -0,0 +1,6 @@
+pr: 102140
+summary: Collect data tiers usage stats more efficiently
+area: ILM+SLM
+type: bug
+issues:
+ - 100230
\ No newline at end of file
diff --git a/docs/changelog/102151.yaml b/docs/changelog/102151.yaml
new file mode 100644
index 0000000000000..652ae555af97d
--- /dev/null
+++ b/docs/changelog/102151.yaml
@@ -0,0 +1,5 @@
+pr: 102151
+summary: Default `run_ml_inference` should be true
+area: Application
+type: bug
+issues: []
diff --git a/docs/changelog/102172.yaml b/docs/changelog/102172.yaml
new file mode 100644
index 0000000000000..485c2c4327e11
--- /dev/null
+++ b/docs/changelog/102172.yaml
@@ -0,0 +1,5 @@
+pr: 102172
+summary: Adjust Histogram's bucket accounting to be iteratively
+area: Aggregations
+type: bug
+issues: []
diff --git a/docs/changelog/102188.yaml b/docs/changelog/102188.yaml
new file mode 100644
index 0000000000000..595a8395fab5c
--- /dev/null
+++ b/docs/changelog/102188.yaml
@@ -0,0 +1,5 @@
+pr: 102188
+summary: Track blocks in `AsyncOperator`
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/102190.yaml b/docs/changelog/102190.yaml
new file mode 100644
index 0000000000000..cd04e041fca5e
--- /dev/null
+++ b/docs/changelog/102190.yaml
@@ -0,0 +1,5 @@
+pr: 102190
+summary: Track pages in ESQL enrich request/response
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/102208.yaml b/docs/changelog/102208.yaml
new file mode 100644
index 0000000000000..b566a85753d82
--- /dev/null
+++ b/docs/changelog/102208.yaml
@@ -0,0 +1,5 @@
+pr: 102208
+summary: Add static node settings to set default values for max merged segment sizes
+area: Engine
+type: enhancement
+issues: []
diff --git a/docs/changelog/94607.yaml b/docs/changelog/94607.yaml
deleted file mode 100644
index eea9264ce90f9..0000000000000
--- a/docs/changelog/94607.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-pr: 94607
-summary: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers
-area: Engine
-type: enhancement
-issues: []
-highlight:
- title: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers
- body: |-
- Rather than forcing a refresh to reclaim memory from indexing buffers, which flushes all
- segments no matter how large, Elasticsearch now takes advantage of
- `IndexWriter#flushNextBuffer` which only flushes the largest pending segment. This should smooth
- out indexing allowing for larger segment sizes, fewer merges and higher throughput.
-
- Furthermore, the selection algorithm to pick which shard to reclaim memory from next was
- changed, from picking the shard that uses the most RAM to going over shards in a round-robin
- fashion. This approach has proved to work significantly better in practice.
-
- notable: true
diff --git a/docs/changelog/97317.yaml b/docs/changelog/97317.yaml
deleted file mode 100644
index 64fcd55e67e28..0000000000000
--- a/docs/changelog/97317.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 97317
-summary: "Fix merges of mappings with `subobjects: false` for composable index templates"
-area: Mapping
-type: bug
-issues:
- - 96768
diff --git a/docs/changelog/97397.yaml b/docs/changelog/97397.yaml
deleted file mode 100644
index 5c1867d55f9bd..0000000000000
--- a/docs/changelog/97397.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 97397
-summary: Return a 410 (Gone) status code for unavailable API endpoints
-area: Infra/REST API
-type: enhancement
-issues: []
diff --git a/docs/changelog/97409.yaml b/docs/changelog/97409.yaml
deleted file mode 100644
index 8c05d6254f7cc..0000000000000
--- a/docs/changelog/97409.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 97409
-summary: Trim stored fields for `_id` field in tsdb
-area: TSDB
-type: enhancement
-issues: []
diff --git a/docs/changelog/97450.yaml b/docs/changelog/97450.yaml
deleted file mode 100644
index a057e0beefaca..0000000000000
--- a/docs/changelog/97450.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 97450
-summary: Make `_index` optional for pinned query docs
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/97642.yaml b/docs/changelog/97642.yaml
deleted file mode 100644
index cf519e04e2d38..0000000000000
--- a/docs/changelog/97642.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 97642
-summary: fix fuzzy query rewrite parameter not work
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/97729.yaml b/docs/changelog/97729.yaml
deleted file mode 100644
index f80a04bc58f68..0000000000000
--- a/docs/changelog/97729.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 97729
-summary: Allow parsing on non-string routing fields
-area: Aggregations
-type: bug
-issues: []
diff --git a/docs/changelog/97972.yaml b/docs/changelog/97972.yaml
deleted file mode 100644
index d4d55e33b4bb2..0000000000000
--- a/docs/changelog/97972.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 97972
-summary: Automatically flatten objects when subobjects:false
-area: Mapping
-type: enhancement
-issues:
- - 88934
diff --git a/docs/changelog/98038.yaml b/docs/changelog/98038.yaml
deleted file mode 100644
index d99db24664f30..0000000000000
--- a/docs/changelog/98038.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98038
-summary: Update enrich execution to only set index false on fields that support it
-area: Ingest Node
-type: bug
-issues:
- - 98019
diff --git a/docs/changelog/98061.yaml b/docs/changelog/98061.yaml
deleted file mode 100644
index 3955b262017f0..0000000000000
--- a/docs/changelog/98061.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98061
-summary: Fix possible NPE when getting transform stats for failed transforms
-area: Transform
-type: bug
-issues:
- - 98052
diff --git a/docs/changelog/98268.yaml b/docs/changelog/98268.yaml
deleted file mode 100644
index ef6f98b8d016c..0000000000000
--- a/docs/changelog/98268.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98268
-summary: Dense vector field types are indexed by default
-area: Vector Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/98309.yaml b/docs/changelog/98309.yaml
deleted file mode 100644
index 550f50b3569a1..0000000000000
--- a/docs/changelog/98309.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98309
-summary: "Integrate Elasticsearch Query Language, ES|QL"
-area: Query Languages
-type: feature
-issues: []
diff --git a/docs/changelog/98332.yaml b/docs/changelog/98332.yaml
deleted file mode 100644
index 6446707515b3c..0000000000000
--- a/docs/changelog/98332.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98332
-summary: Correct behaviour of `ContentPath::remove()`
-area: Mapping
-type: bug
-issues:
- - 98327
diff --git a/docs/changelog/98337.yaml b/docs/changelog/98337.yaml
deleted file mode 100644
index 8664ae15eed00..0000000000000
--- a/docs/changelog/98337.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98337
-summary: TopN sorting with min and max for multi-value fields
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/98360.yaml b/docs/changelog/98360.yaml
deleted file mode 100644
index b6b8696259c98..0000000000000
--- a/docs/changelog/98360.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98360
-summary: Use a competitive iterator in `FiltersAggregator`
-area: Aggregations
-type: enhancement
-issues:
- - 97544
diff --git a/docs/changelog/98406.yaml b/docs/changelog/98406.yaml
deleted file mode 100644
index f62af64171944..0000000000000
--- a/docs/changelog/98406.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98406
-summary: Safely drain deployment request queues before allowing node to shutdown
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/98457.yaml b/docs/changelog/98457.yaml
deleted file mode 100644
index 465c9ed30cc5b..0000000000000
--- a/docs/changelog/98457.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98457
-summary: Support cluster/details for CCS minimize_roundtrips=false
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/98470.yaml b/docs/changelog/98470.yaml
deleted file mode 100644
index 498b1db244d22..0000000000000
--- a/docs/changelog/98470.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98470
-summary: Reduce verbosity of the bulk indexing audit log
-area: Audit
-type: enhancement
-issues: []
diff --git a/docs/changelog/98512.yaml b/docs/changelog/98512.yaml
deleted file mode 100644
index c2108a18c6b91..0000000000000
--- a/docs/changelog/98512.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98512
-summary: Automatically map float arrays of lengths 128 - 2048 as dense_vector
-area: Application
-type: feature
-issues:
- - 97532
diff --git a/docs/changelog/98518.yaml b/docs/changelog/98518.yaml
deleted file mode 100644
index 2f961fc11ce69..0000000000000
--- a/docs/changelog/98518.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98518
-summary: Add `index.look_back_time` setting for tsdb data streams
-area: TSDB
-type: enhancement
-issues:
- - 98463
diff --git a/docs/changelog/98528.yaml b/docs/changelog/98528.yaml
deleted file mode 100644
index 0004499e58f83..0000000000000
--- a/docs/changelog/98528.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98528
-summary: "ESQL: Add support for TEXT fields in comparison operators and SORT"
-area: ES|QL
-type: enhancement
-issues:
- - 98642
diff --git a/docs/changelog/98550.yaml b/docs/changelog/98550.yaml
deleted file mode 100644
index 30c9891b15182..0000000000000
--- a/docs/changelog/98550.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98550
-summary: Report a node's "roles" setting in the /_cluster/allocation/explain response
-area: Allocation
-type: enhancement
-issues: [97859]
diff --git a/docs/changelog/98574.yaml b/docs/changelog/98574.yaml
deleted file mode 100644
index bf016b4c241c8..0000000000000
--- a/docs/changelog/98574.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98574
-summary: Specify correct current `IndexVersion` after 8.10 release
-area: Infra/Core
-type: bug
-issues:
- - 98555
diff --git a/docs/changelog/98590.yaml b/docs/changelog/98590.yaml
deleted file mode 100644
index f3ef3cdd56a12..0000000000000
--- a/docs/changelog/98590.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98590
-summary: "ESQL: LTRIM, RTRIM and fix unicode whitespace"
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/98622.yaml b/docs/changelog/98622.yaml
deleted file mode 100644
index 8c41444b6c725..0000000000000
--- a/docs/changelog/98622.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98622
-summary: Add 'dataset' size to cat indices and cat shards
-area: CAT APIs
-type: enhancement
-issues:
- - 95092
diff --git a/docs/changelog/98628.yaml b/docs/changelog/98628.yaml
deleted file mode 100644
index 2ecd9dd23e0ef..0000000000000
--- a/docs/changelog/98628.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98628
-summary: Add ESQL own flavor of arithmetic operators
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/98630.yaml b/docs/changelog/98630.yaml
deleted file mode 100644
index 444c593f87d0b..0000000000000
--- a/docs/changelog/98630.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98630
-summary: "ESQL: LEAST and GREATEST functions"
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/98635.yaml b/docs/changelog/98635.yaml
deleted file mode 100644
index 274096951fcf6..0000000000000
--- a/docs/changelog/98635.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98635
-summary: Fix NPE in `StableMasterHealthIndicatorService`
-area: Health
-type: bug
-issues: []
diff --git a/docs/changelog/98653.yaml b/docs/changelog/98653.yaml
deleted file mode 100644
index 384a29c3cc4ab..0000000000000
--- a/docs/changelog/98653.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98653
-summary: Reset `GatewayService` flags before reroute
-area: Cluster Coordination
-type: bug
-issues:
- - 98606
diff --git a/docs/changelog/98654.yaml b/docs/changelog/98654.yaml
deleted file mode 100644
index ea63edb93eb58..0000000000000
--- a/docs/changelog/98654.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98654
-summary: Allow native users/roles to be disabled via setting
-area: Authentication
-type: enhancement
-issues: []
diff --git a/docs/changelog/98684.yaml b/docs/changelog/98684.yaml
deleted file mode 100644
index 552e85a04151a..0000000000000
--- a/docs/changelog/98684.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98684
-summary: Explicit parsing object capabilities of `FieldMappers`
-area: Mapping
-type: enhancement
-issues:
- - 98537
diff --git a/docs/changelog/98711.yaml b/docs/changelog/98711.yaml
deleted file mode 100644
index 43e0c2a03e8fa..0000000000000
--- a/docs/changelog/98711.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98711
-summary: Support unsigned long in sqrt and log10 for ESQL
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/98759.yaml b/docs/changelog/98759.yaml
deleted file mode 100644
index df6180bddc192..0000000000000
--- a/docs/changelog/98759.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98759
-summary: "ESQL: Support queries that don't return underlying fields"
-area: ES|QL
-type: bug
-issues:
- - 98404
diff --git a/docs/changelog/98809.yaml b/docs/changelog/98809.yaml
deleted file mode 100644
index f9f5be523e179..0000000000000
--- a/docs/changelog/98809.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 98809
-summary: Avoiding the use of nodes that are no longer in the cluster when computing
- master stability
-area: Health
-type: enhancement
-issues:
- - 98636
diff --git a/docs/changelog/98811.yaml b/docs/changelog/98811.yaml
deleted file mode 100644
index 338efbcf1d8c9..0000000000000
--- a/docs/changelog/98811.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98811
-summary: Allow explain data stream lifecycle to accept a data stream
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/98824.yaml b/docs/changelog/98824.yaml
deleted file mode 100644
index 7e2c43d266232..0000000000000
--- a/docs/changelog/98824.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98824
-summary: Consider node shutdown in `DataTierAllocationDecider`
-area: "Allocation"
-type: bug
-issues:
- - 97207
diff --git a/docs/changelog/98840.yaml b/docs/changelog/98840.yaml
deleted file mode 100644
index bb358916354dc..0000000000000
--- a/docs/changelog/98840.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98840
-summary: Don't ignore empty index template that have no template definition
-area: TSDB
-type: bug
-issues:
- - 98834
diff --git a/docs/changelog/98843.yaml b/docs/changelog/98843.yaml
deleted file mode 100644
index 742ae25697718..0000000000000
--- a/docs/changelog/98843.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98843
-summary: Fix UnsignedLong field range query gt "0" can get the result equal to 0
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/98844.yaml b/docs/changelog/98844.yaml
deleted file mode 100644
index a5870e7344d15..0000000000000
--- a/docs/changelog/98844.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98844
-summary: Add accessors required to recreate `TransformStats` object from the fields
-area: Transform
-type: enhancement
-issues: []
diff --git a/docs/changelog/98847.yaml b/docs/changelog/98847.yaml
deleted file mode 100644
index ab7455bd783c3..0000000000000
--- a/docs/changelog/98847.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98847
-summary: "ESQL: Add `CEIL` function"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/98870.yaml b/docs/changelog/98870.yaml
deleted file mode 100644
index b719fbb0caf22..0000000000000
--- a/docs/changelog/98870.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98870
-summary: "ESQL: Add ability to perform date math"
-area: ES|QL
-type: enhancement
-issues:
- - 98402
diff --git a/docs/changelog/98874.yaml b/docs/changelog/98874.yaml
new file mode 100644
index 0000000000000..e3eb7b5acc63f
--- /dev/null
+++ b/docs/changelog/98874.yaml
@@ -0,0 +1,5 @@
+pr: 98874
+summary: Estimate the memory required to deploy trained models more accurately
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/98878.yaml b/docs/changelog/98878.yaml
deleted file mode 100644
index 4fa8b23851bf9..0000000000000
--- a/docs/changelog/98878.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98878
-summary: Fix percolator query for stored queries that expand on wildcard field names
-area: Percolator
-type: bug
-issues: []
diff --git a/docs/changelog/98888.yaml b/docs/changelog/98888.yaml
deleted file mode 100644
index 1f2f7ea27ff19..0000000000000
--- a/docs/changelog/98888.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98888
-summary: Revert "Kibana system index does not allow user templates to affect it"
-area: Infra/Core
-type: bug
-issues: []
diff --git a/docs/changelog/98915.yaml b/docs/changelog/98915.yaml
deleted file mode 100644
index c23ddcc55d98e..0000000000000
--- a/docs/changelog/98915.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98915
-summary: Avoid risk of OOM in datafeeds when memory is constrained
-area: Machine Learning
-type: bug
-issues: [89769]
diff --git a/docs/changelog/98930.yaml b/docs/changelog/98930.yaml
deleted file mode 100644
index e6a2c74192ebe..0000000000000
--- a/docs/changelog/98930.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98930
-summary: Frozen index input clone copy cache file
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/98942.yaml b/docs/changelog/98942.yaml
deleted file mode 100644
index 4d8eeee5192e5..0000000000000
--- a/docs/changelog/98942.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98942
-summary: "ESQL: LEFT function"
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/98972.yaml b/docs/changelog/98972.yaml
deleted file mode 100644
index acd336ff7d666..0000000000000
--- a/docs/changelog/98972.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 98972
-summary: "ES|QL: Implement serialization of `InvalidMappedField`"
-area: ES|QL
-type: bug
-issues:
- - 98851
diff --git a/docs/changelog/98974.yaml b/docs/changelog/98974.yaml
deleted file mode 100644
index 90950986141ab..0000000000000
--- a/docs/changelog/98974.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98974
-summary: "ESQL: RIGHT function"
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/98996.yaml b/docs/changelog/98996.yaml
deleted file mode 100644
index 1f1bdd35ff643..0000000000000
--- a/docs/changelog/98996.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 98996
-summary: Reintroduce `sparse_vector` mapping
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/99054.yaml b/docs/changelog/99054.yaml
deleted file mode 100644
index a9e4128e7ae97..0000000000000
--- a/docs/changelog/99054.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99054
-summary: "ESQL: Mark counter fields as unsupported"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99058.yaml b/docs/changelog/99058.yaml
deleted file mode 100644
index a112834add071..0000000000000
--- a/docs/changelog/99058.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99058
-summary: "ESQL: log query and execution time"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99091.yaml b/docs/changelog/99091.yaml
deleted file mode 100644
index 2c7be19b161ba..0000000000000
--- a/docs/changelog/99091.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99091
-summary: Add flamegraph API
-area: Application
-type: enhancement
-issues: []
diff --git a/docs/changelog/99106.yaml b/docs/changelog/99106.yaml
deleted file mode 100644
index 21cb121595d2b..0000000000000
--- a/docs/changelog/99106.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99106
-summary: "Add support for Persian language stemmer"
-area: Analysis
-type: feature
-issues:
- - 98911
diff --git a/docs/changelog/99107.yaml b/docs/changelog/99107.yaml
deleted file mode 100644
index a808fb57fcf80..0000000000000
--- a/docs/changelog/99107.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99107
-summary: Wait to gracefully stop deployments until alternative allocation exists
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/99117.yaml b/docs/changelog/99117.yaml
deleted file mode 100644
index 491692f232081..0000000000000
--- a/docs/changelog/99117.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99117
-summary: Do not report failure after connections are made
-area: Network
-type: bug
-issues: []
diff --git a/docs/changelog/99163.yaml b/docs/changelog/99163.yaml
deleted file mode 100644
index f7a44c7f24869..0000000000000
--- a/docs/changelog/99163.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99163
-summary: Use `NamedWritable` to enable `GeoBoundingBox` serialisation
-area: Geo
-type: bug
-issues:
- - 99089
diff --git a/docs/changelog/99188.yaml b/docs/changelog/99188.yaml
deleted file mode 100644
index c22e3ba4b36e5..0000000000000
--- a/docs/changelog/99188.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99188
-summary: "ESQL: skip synthetic attributes when planning the physical fragment"
-area: ES|QL
-type: bug
-issues:
- - 99170
diff --git a/docs/changelog/99193.yaml b/docs/changelog/99193.yaml
deleted file mode 100644
index 9db646dc80435..0000000000000
--- a/docs/changelog/99193.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99193
-summary: Wait for cluster state in recovery
-area: Recovery
-type: enhancement
-issues: []
diff --git a/docs/changelog/99215.yaml b/docs/changelog/99215.yaml
deleted file mode 100644
index 99227839b491e..0000000000000
--- a/docs/changelog/99215.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99215
-summary: Skip `DisiPriorityQueue` on single filter agg
-area: Aggregations
-type: enhancement
-issues:
- - 99202
diff --git a/docs/changelog/99219.yaml b/docs/changelog/99219.yaml
deleted file mode 100644
index 811e2df5f83d0..0000000000000
--- a/docs/changelog/99219.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99219
-summary: Reduce copying when creating scroll/PIT ids
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/99222.yaml b/docs/changelog/99222.yaml
deleted file mode 100644
index 025c5e01d2a53..0000000000000
--- a/docs/changelog/99222.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99222
-summary: Fork response-sending in `OpenPointInTimeAction`
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/99223.yaml b/docs/changelog/99223.yaml
deleted file mode 100644
index 914441931033b..0000000000000
--- a/docs/changelog/99223.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-pr: 99223
-summary: Remove `transport_versions` from cluster state API
-area: Infra/Core
-type: breaking
-issues: []
-breaking:
- title: Remove `transport_versions` from cluster state API
- area: REST API
- details: The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject.
- impact: If needed, retrieve the per-node transport versions from the `nodes_versions` subobject.
- notable: false
diff --git a/docs/changelog/99224.yaml b/docs/changelog/99224.yaml
deleted file mode 100644
index cde4084ab0e84..0000000000000
--- a/docs/changelog/99224.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99224
-summary: Add new _inference API
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/99278.yaml b/docs/changelog/99278.yaml
deleted file mode 100644
index f2788a00e6369..0000000000000
--- a/docs/changelog/99278.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99278
-summary: Support rotatating the JWT shared secret
-area: Security
-type: enhancement
-issues: []
diff --git a/docs/changelog/99286.yaml b/docs/changelog/99286.yaml
deleted file mode 100644
index 1b37416d51ba6..0000000000000
--- a/docs/changelog/99286.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99286
-summary: "ESQL: Log execution time consistently"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99300.yaml b/docs/changelog/99300.yaml
deleted file mode 100644
index 508001b98f29e..0000000000000
--- a/docs/changelog/99300.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99300
-summary: Change `GetFromTranslog` to indices action
-area: CRUD
-type: bug
-issues: []
diff --git a/docs/changelog/99303.yaml b/docs/changelog/99303.yaml
deleted file mode 100644
index 479c3a3e280c7..0000000000000
--- a/docs/changelog/99303.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99303
-summary: Use DEBUG log level to report ESQL execution steps
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99310.yaml b/docs/changelog/99310.yaml
deleted file mode 100644
index 8b595fe93fd33..0000000000000
--- a/docs/changelog/99310.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99310
-summary: "ESQL: \"params\" correctly parses the values including an optional \"type\""
-area: ES|QL
-type: bug
-issues:
- - 99294
diff --git a/docs/changelog/99316.yaml b/docs/changelog/99316.yaml
deleted file mode 100644
index 78857b433b385..0000000000000
--- a/docs/changelog/99316.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99316
-summary: "ESQL: Compact topn"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99346.yaml b/docs/changelog/99346.yaml
deleted file mode 100644
index fc6fe02e6bf14..0000000000000
--- a/docs/changelog/99346.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99346
-summary: Automatically disable `ignore_malformed` on datastream `@timestamp` fields
-area: Mapping
-type: bug
-issues: []
diff --git a/docs/changelog/99382.yaml b/docs/changelog/99382.yaml
deleted file mode 100644
index 5f5eb932ed458..0000000000000
--- a/docs/changelog/99382.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99382
-summary: "ESQL: create a Vector when needed for IN"
-area: ES|QL
-type: bug
-issues:
- - 99347
diff --git a/docs/changelog/99417.yaml b/docs/changelog/99417.yaml
deleted file mode 100644
index 8c88a5a548dff..0000000000000
--- a/docs/changelog/99417.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99417
-summary: Disable `FilterByFilterAggregator` through `ClusterSettings`
-area: Aggregations
-type: enhancement
-issues:
- - 99335
diff --git a/docs/changelog/99432.yaml b/docs/changelog/99432.yaml
deleted file mode 100644
index df4c5a7f78199..0000000000000
--- a/docs/changelog/99432.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99432
-summary: "ESQL: Enable arithmetics for durations and periods"
-area: ES|QL
-type: enhancement
-issues: [99293]
diff --git a/docs/changelog/99470.yaml b/docs/changelog/99470.yaml
deleted file mode 100644
index 3e784595cc6ac..0000000000000
--- a/docs/changelog/99470.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99470
-summary: "ESQL: Improve log messages"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99474.yaml b/docs/changelog/99474.yaml
deleted file mode 100644
index ea23481069833..0000000000000
--- a/docs/changelog/99474.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99474
-summary: Add `java.net.NetPermission` to APM module's permissions
-area: Infra/Core
-type: bug
-issues: []
diff --git a/docs/changelog/99515.yaml b/docs/changelog/99515.yaml
deleted file mode 100644
index 7de237531a506..0000000000000
--- a/docs/changelog/99515.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99515
-summary: Add `IndexVersion` to node info
-area: Infra/REST API
-type: enhancement
-issues: []
diff --git a/docs/changelog/99527.yaml b/docs/changelog/99527.yaml
deleted file mode 100644
index 19eef621fa500..0000000000000
--- a/docs/changelog/99527.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99445
-summary: Add new max_inner_product vector similarity function
-area: Vector Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/99532.yaml b/docs/changelog/99532.yaml
deleted file mode 100644
index 859ba963600a8..0000000000000
--- a/docs/changelog/99532.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99532
-summary: Adds `nested` support for indexed `dense_vector` fields
-area: Vector Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/99555.yaml b/docs/changelog/99555.yaml
deleted file mode 100644
index 5e53e8782e08c..0000000000000
--- a/docs/changelog/99555.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99555
-summary: Use mappings version to retrieve system index mappings at creation time
-area: Infra/Core
-type: enhancement
-issues: []
diff --git a/docs/changelog/99566.yaml b/docs/changelog/99566.yaml
deleted file mode 100644
index caad871bf58ed..0000000000000
--- a/docs/changelog/99566.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99566
-summary: Add additional counters to `_clusters` response for all Cluster search states
-area: Search
-type: enhancement
-issues:
- - 98927
diff --git a/docs/changelog/99567.yaml b/docs/changelog/99567.yaml
deleted file mode 100644
index aea65e55b6ee2..0000000000000
--- a/docs/changelog/99567.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99567
-summary: Make tsdb settings public in Serverless
-area: TSDB
-type: bug
-issues:
- - 99563
diff --git a/docs/changelog/99584.yaml b/docs/changelog/99584.yaml
deleted file mode 100644
index 229e3d8024506..0000000000000
--- a/docs/changelog/99584.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99584
-summary: Adding an option for trained models to be platform specific
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/99588.yaml b/docs/changelog/99588.yaml
deleted file mode 100644
index 7cbb53376fdf0..0000000000000
--- a/docs/changelog/99588.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99588
-summary: Make ESQL more resilient to non-indexed fields
-area: ES|QL
-type: bug
-issues:
- - 99506
diff --git a/docs/changelog/99601.yaml b/docs/changelog/99601.yaml
deleted file mode 100644
index 9deba859a5cef..0000000000000
--- a/docs/changelog/99601.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99601
-summary: "ESQL: continue resolving attributes for Eval"
-area: ES|QL
-type: bug
-issues:
- - 99576
diff --git a/docs/changelog/99627.yaml b/docs/changelog/99627.yaml
deleted file mode 100644
index 84abdf6418dc2..0000000000000
--- a/docs/changelog/99627.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99627
-summary: Fix thread context in `getRepositoryData`
-area: Snapshot/Restore
-type: bug
-issues: []
diff --git a/docs/changelog/99631.yaml b/docs/changelog/99631.yaml
deleted file mode 100644
index d9174de76f1ea..0000000000000
--- a/docs/changelog/99631.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99631
-summary: Add component info versions to node info in a pluggable way
-area: Infra/REST API
-type: enhancement
-issues: []
diff --git a/docs/changelog/99641.yaml b/docs/changelog/99641.yaml
deleted file mode 100644
index c74f7380bd93a..0000000000000
--- a/docs/changelog/99641.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99641
-summary: Chunk the cluster allocation explain response
-area: Network
-type: enhancement
-issues: [97803]
diff --git a/docs/changelog/99644.yaml b/docs/changelog/99644.yaml
deleted file mode 100644
index 10c10448c074c..0000000000000
--- a/docs/changelog/99644.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99644
-summary: Add links to docs from failing bootstrap checks
-area: Infra/Node Lifecycle
-type: enhancement
-issues: [99614]
-
diff --git a/docs/changelog/99655.yaml b/docs/changelog/99655.yaml
deleted file mode 100644
index 3d1e76ec47aa3..0000000000000
--- a/docs/changelog/99655.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99655
-summary: "[Profiling] Allow to wait until resources created"
-area: Application
-type: enhancement
-issues: []
diff --git a/docs/changelog/99682.yaml b/docs/changelog/99682.yaml
deleted file mode 100644
index 48e99a5145674..0000000000000
--- a/docs/changelog/99682.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99682
-summary: Increase the max vector dims to 4096
-area: Vector Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/99685.yaml b/docs/changelog/99685.yaml
deleted file mode 100644
index 43dac2abbb312..0000000000000
--- a/docs/changelog/99685.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99685
-summary: Fix `advanceExact` for doc values from sources
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/99694.yaml b/docs/changelog/99694.yaml
deleted file mode 100644
index a449ecb2ae378..0000000000000
--- a/docs/changelog/99694.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99694
-summary: Remove shard data files when they fail to write for snapshot
-area: Snapshot/Restore
-type: enhancement
-issues: []
diff --git a/docs/changelog/99695.yaml b/docs/changelog/99695.yaml
deleted file mode 100644
index 6dc4037a57763..0000000000000
--- a/docs/changelog/99695.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99695
-summary: "ESQL: Better management of not stored TEXT fiels with synthetic source"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/99711.yaml b/docs/changelog/99711.yaml
deleted file mode 100644
index 34731a52818f0..0000000000000
--- a/docs/changelog/99711.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99711
-summary: "ESQL: Date math for negatives"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99712.yaml b/docs/changelog/99712.yaml
deleted file mode 100644
index c5fa1ac1e64ec..0000000000000
--- a/docs/changelog/99712.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99712
-summary: Make downsample target index replicas configurable
-area: Downsampling
-type: bug
-issues: []
diff --git a/docs/changelog/99717.yaml b/docs/changelog/99717.yaml
deleted file mode 100644
index db48c69ed68a2..0000000000000
--- a/docs/changelog/99717.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99717
-summary: Treating watcher webhook response header names as case-insensitive
-area: Watcher
-type: bug
-issues: []
diff --git a/docs/changelog/99726.yaml b/docs/changelog/99726.yaml
deleted file mode 100644
index 23350fdb85bd0..0000000000000
--- a/docs/changelog/99726.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99726
-summary: "ESQL: Account for an exception being thrown when building a `BytesRefArrayBlock`"
-area: ES|QL
-type: bug
-issues:
- - 99472
diff --git a/docs/changelog/99736.yaml b/docs/changelog/99736.yaml
deleted file mode 100644
index fbf177ea152a8..0000000000000
--- a/docs/changelog/99736.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99736
-summary: "ESQL: enhance SHOW FUNCTIONS command"
-area: ES|QL
-type: enhancement
-issues:
- - 99507
diff --git a/docs/changelog/99746.yaml b/docs/changelog/99746.yaml
deleted file mode 100644
index c4cdbc00f82c1..0000000000000
--- a/docs/changelog/99746.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99746
-summary: "ESQL: Log start and end of queries"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99775.yaml b/docs/changelog/99775.yaml
deleted file mode 100644
index 0c0dbdb1fce87..0000000000000
--- a/docs/changelog/99775.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99775
-summary: Adding support for exist queries to `sparse_vector` fields
-area: Search
-type: enhancement
-issues:
- - 99319
diff --git a/docs/changelog/99796.yaml b/docs/changelog/99796.yaml
deleted file mode 100644
index cad10564ed294..0000000000000
--- a/docs/changelog/99796.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99796
-summary: Support runtime fields in synthetic source
-area: Aggregations
-type: bug
-issues:
- - 98287
diff --git a/docs/changelog/99797.yaml b/docs/changelog/99797.yaml
deleted file mode 100644
index e46d4501291b5..0000000000000
--- a/docs/changelog/99797.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99797
-summary: Wait for cluster to recover before resolving index template
-area: CRUD
-type: bug
-issues: []
diff --git a/docs/changelog/99798.yaml b/docs/changelog/99798.yaml
deleted file mode 100644
index bd8b9da71541d..0000000000000
--- a/docs/changelog/99798.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 99798
-summary: Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and
- related action) response
-area: Infra/Node Lifecycle
-type: enhancement
-issues:
- - 99678
diff --git a/docs/changelog/99804.yaml b/docs/changelog/99804.yaml
deleted file mode 100644
index b4c226217e352..0000000000000
--- a/docs/changelog/99804.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99804
-summary: Correctly handle `ScriptScoreQuery` in plain highlighter
-area: Highlighting
-type: bug
-issues:
- - 99700
diff --git a/docs/changelog/99816.yaml b/docs/changelog/99816.yaml
deleted file mode 100644
index 4caf8a36f54b4..0000000000000
--- a/docs/changelog/99816.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99816
-summary: "ESQL: Lower the implicit limit, if none is user-provided"
-area: ES|QL
-type: enhancement
-issues:
- - 99458
diff --git a/docs/changelog/99827.yaml b/docs/changelog/99827.yaml
deleted file mode 100644
index 3e6690a8e9e68..0000000000000
--- a/docs/changelog/99827.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99827
-summary: "ESQL: Fix NPE when aggregating literals"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/99832.yaml b/docs/changelog/99832.yaml
deleted file mode 100644
index 9bd83591ba920..0000000000000
--- a/docs/changelog/99832.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99832
-summary: APM Metering API
-area: Infra/Core
-type: enhancement
-issues: []
diff --git a/docs/changelog/99873.yaml b/docs/changelog/99873.yaml
deleted file mode 100644
index d726ba00a1558..0000000000000
--- a/docs/changelog/99873.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99873
-summary: "[Profiling] Tighten resource creation check"
-area: Application
-type: bug
-issues: []
diff --git a/docs/changelog/99874.yaml b/docs/changelog/99874.yaml
deleted file mode 100644
index d23fc1ea6edde..0000000000000
--- a/docs/changelog/99874.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99874
-summary: "ESQL: Use exact attributes for data source extraction"
-area: ES|QL
-type: bug
-issues:
- - 99183
diff --git a/docs/changelog/99909.yaml b/docs/changelog/99909.yaml
deleted file mode 100644
index 2051a30e4efa1..0000000000000
--- a/docs/changelog/99909.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99909
-summary: "[Profiling] Allow to customize the ILM policy"
-area: Application
-type: enhancement
-issues: []
diff --git a/docs/changelog/99912.yaml b/docs/changelog/99912.yaml
deleted file mode 100644
index 06f0f9baa9661..0000000000000
--- a/docs/changelog/99912.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99912
-summary: Represent histogram value count as long
-area: Aggregations
-type: enhancement
-issues:
- - 99820
diff --git a/docs/changelog/99938.yaml b/docs/changelog/99938.yaml
deleted file mode 100644
index 4349b73516cae..0000000000000
--- a/docs/changelog/99938.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99938
-summary: "Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest"
-area: Stats
-type: enhancement
-issues: [99744]
diff --git a/docs/changelog/99947.yaml b/docs/changelog/99947.yaml
deleted file mode 100644
index 61996c8fde92b..0000000000000
--- a/docs/changelog/99947.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99947
-summary: GET `_data_stream` displays both ILM and DSL information
-area: Data streams
-type: feature
-issues: []
diff --git a/docs/changelog/99956.yaml b/docs/changelog/99956.yaml
deleted file mode 100644
index 04646a98898a3..0000000000000
--- a/docs/changelog/99956.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 99956
-summary: "ESQL: Serialize the source in expressions"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/99995.yaml b/docs/changelog/99995.yaml
deleted file mode 100644
index d67cbdaec1f37..0000000000000
--- a/docs/changelog/99995.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 99995
-summary: When a primary is inactive but this is considered expected, the same applies for the replica of this shard.
-area: Health
-type: enhancement
-issues:
- - 99951
diff --git a/docs/plugins/development/creating-stable-plugins.asciidoc b/docs/plugins/development/creating-stable-plugins.asciidoc
index a8efc86c5beac..c9a8a1f6c7e2a 100644
--- a/docs/plugins/development/creating-stable-plugins.asciidoc
+++ b/docs/plugins/development/creating-stable-plugins.asciidoc
@@ -59,7 +59,7 @@ for the plugin. If you need other resources, package them into a resources JAR.
[discrete]
==== Development process
-Elastic provides a Grade plugin, `elasticsearch.stable-esplugin`, that makes it
+Elastic provides a Gradle plugin, `elasticsearch.stable-esplugin`, that makes it
easier to develop and package stable plugins. The steps in this section assume
you use this plugin. However, you don't need Gradle to create plugins.
@@ -128,4 +128,4 @@ extend `ESClientYamlSuiteTestCase`.
[[plugin-descriptor-file-stable]]
==== The plugin descriptor file for stable plugins
-include::plugin-descriptor-file.asciidoc[]
\ No newline at end of file
+include::plugin-descriptor-file.asciidoc[]
diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc
index b5f1315531916..44a00b9f5b99e 100644
--- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc
+++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc
@@ -68,7 +68,7 @@ POST /_search
--------------------------------------------------
// TEST[setup:sales]
-<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
+<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-month intervals
<2> A `sum` metric is used to calculate the sum of a field. This could be any numeric metric (sum, min, max, etc)
<3> Finally, we specify a `moving_fn` aggregation which uses "the_sum" metric as its input.
diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc
index 7153e99e503a8..f9574ed933398 100644
--- a/docs/reference/cat/allocation.asciidoc
+++ b/docs/reference/cat/allocation.asciidoc
@@ -6,7 +6,7 @@
[IMPORTANT]
====
-cat APIs are only intended for human consumption using the command line or {kib}
+cat APIs are only intended for human consumption using the command line or {kib}
console. They are _not_ intended for use by applications.
====
@@ -113,10 +113,10 @@ The API returns the following response:
[source,txt]
--------------------------------------------------
-shards disk.indices disk.used disk.avail disk.total disk.percent host ip node
- 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2
+shards disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role
+ 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst
--------------------------------------------------
// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/]
-// TESTRESPONSE[s/CSUXak2/.+/ non_json]
+// TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json]
This response shows a single shard is allocated to the one node available.
diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc
index de11bbcfc2d4e..a6c13e5aae708 100644
--- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc
+++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc
@@ -138,7 +138,7 @@ and that the next generation index will also be managed by {ilm-init}:
<2> For each backing index we display the value of the <>
configuration which will indicate if {ilm-init} takes precedence over data stream lifecycle in case
both systems are configured for an index.
-<3> The {ilm-ini} policy configured for this index.
+<3> The {ilm-init} policy configured for this index.
<4> The system that manages this index (possible values are "Index Lifecycle Management",
"Data stream lifecycle", or "Unmanaged")
<5> The system that will manage the next generation index (the new write index of this
@@ -196,7 +196,7 @@ precedence over Data stream lifecycle.
<2> We're configuring the data stream lifecycle so _new_ data streams will be
managed by Data stream lifecycle.
-We've now make sure that new data streams will be managed by Data stream lifecycle.
+We've now made sure that new data streams will be managed by Data stream lifecycle.
Let's update our existing `dsl-data-stream` and configure Data stream lifecycle:
@@ -359,7 +359,7 @@ the index template>>.
We can achieve this in two ways:
1. <> from the data streams
-2. Disable Data stream lifecycle by configured the `enabled` flag to `false`.
+2. Disable Data stream lifecycle by configuring the `enabled` flag to `false`.
Let's implement option 2 and disable the data stream lifecycle:
diff --git a/docs/reference/esql/esql-examples.asciidoc b/docs/reference/esql/esql-examples.asciidoc
index 569dcf1172b38..817ec4f7b6f24 100644
--- a/docs/reference/esql/esql-examples.asciidoc
+++ b/docs/reference/esql/esql-examples.asciidoc
@@ -13,11 +13,11 @@
----
FROM logs-*
| WHERE event.code IS NOT NULL
-| STATS event_code_count = count(event.code) by event.code,host.name
-| ENRICH win_events on event.code with event_description
+| STATS event_code_count = COUNT(event.code) BY event.code,host.name
+| ENRICH win_events ON event.code WITH event_description
| WHERE event_description IS NOT NULL and host.name IS NOT NULL
-| RENAME event_description as event.description
-| SORT event_code_count desc
+| RENAME event_description AS event.description
+| SORT event_code_count DESC
| KEEP event_code_count,event.code,host.name,event.description
----
@@ -40,7 +40,7 @@ FROM logs-endpoint
| WHERE process.name == "curl.exe"
| STATS bytes = SUM(destination.bytes) BY destination.address
| EVAL kb = bytes/1024
-| SORT kb desc
+| SORT kb DESC
| LIMIT 10
| KEEP kb,destination.address
----
@@ -60,7 +60,7 @@ FROM logs-endpoint
----
FROM logs-*
| GROK dns.question.name "%{DATA}\\.%{GREEDYDATA:dns.question.registered_domain:string}"
-| STATS unique_queries = count_distinct(dns.question.name) by dns.question.registered_domain, process.name
+| STATS unique_queries = COUNT_DISTINCT(dns.question.name) BY dns.question.registered_domain, process.name
| WHERE unique_queries > 10
| SORT unique_queries DESC
| RENAME unique_queries AS `Unique Queries`, dns.question.registered_domain AS `Registered Domain`, process.name AS `Process`
@@ -85,7 +85,7 @@ FROM logs-*
| ENRICH ldap_lookup_new ON user.name
| WHERE group.name IS NOT NULL
| EVAL follow_up = CASE(destcount >= 100, "true","false")
-| SORT destcount desc
+| SORT destcount DESC
| KEEP destcount, host.name, user.name, group.name, follow_up
----
diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc
index 82831ef943398..e54825406257f 100644
--- a/docs/reference/esql/esql-get-started.asciidoc
+++ b/docs/reference/esql/esql-get-started.asciidoc
@@ -7,50 +7,14 @@
This guide shows how you can use {esql} to query and aggregate your data.
-TIP: To get started with {esql} without setting up your own deployment, visit
-the public {esql} demo environment at
-https://esql.demo.elastic.co/[esql.demo.elastic.co]. It comes with preloaded
-data sets and sample queries.
-
[discrete]
[[esql-getting-started-prerequisites]]
=== Prerequisites
-To follow along with the queries in this getting started guide, first ingest
-some sample data using the following requests:
-
-[source,console]
-----
-PUT sample_data
-{
- "mappings": {
- "properties": {
- "client.ip": {
- "type": "ip"
- },
- "message": {
- "type": "keyword"
- }
- }
- }
-}
-
-PUT sample_data/_bulk
-{"index": {}}
-{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233}
-{"index": {}}
-{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889}
-{"index": {}}
-{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382}
-{"index": {}}
-{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448}
-{"index": {}}
-{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153}
-{"index": {}}
-{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755}
-{"index": {}}
-{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467}
-----
+To follow along with the queries in this guide, you can either set up your own
+deployment, or use Elastic's public {esql} demo environment.
+
+include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[]
[discrete]
[[esql-getting-started-running-queries]]
@@ -58,7 +22,7 @@ PUT sample_data/_bulk
In {kib}, you can use Console or Discover to run {esql} queries:
-include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget.asciidoc[]
+include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc[]
[discrete]
[[esql-getting-started-first-query]]
@@ -300,57 +264,9 @@ image::images/esql/esql-enrich.png[align="center"]
Before you can use `ENRICH`, you first need to
<> and <>
-an <>. The following requests create and
-execute a policy that links an IP address to an environment ("Development",
-"QA", or "Production"):
-
-[source,console]
-----
-PUT clientips
-{
- "mappings": {
- "properties": {
- "client.ip": {
- "type": "keyword"
- },
- "env": {
- "type": "keyword"
- }
- }
- }
-}
-
-PUT clientips/_bulk
-{ "index" : {}}
-{ "client.ip": "172.21.0.5", "env": "Development" }
-{ "index" : {}}
-{ "client.ip": "172.21.2.113", "env": "QA" }
-{ "index" : {}}
-{ "client.ip": "172.21.2.162", "env": "QA" }
-{ "index" : {}}
-{ "client.ip": "172.21.3.15", "env": "Production" }
-{ "index" : {}}
-{ "client.ip": "172.21.3.16", "env": "Production" }
-
-PUT /_enrich/policy/clientip_policy
-{
- "match": {
- "indices": "clientips",
- "match_field": "client.ip",
- "enrich_fields": ["env"]
- }
-}
-
-PUT /_enrich/policy/clientip_policy/_execute
-----
-
-////
-[source,console]
-----
-DELETE /_enrich/policy/clientip_policy
-----
-// TEST[continued]
-////
+an <>.
+
+include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[]
After creating and executing a policy, you can use it with the `ENRICH`
command:
diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc
index 303f9a337b6c4..c7829ab9fba81 100644
--- a/docs/reference/esql/esql-limitations.asciidoc
+++ b/docs/reference/esql/esql-limitations.asciidoc
@@ -10,20 +10,8 @@
=== Result set size limit
By default, an {esql} query returns up to 500 rows. You can increase the number
-of rows up to 10,000 using the <> command. Queries do not return
-more than 10,000 rows, regardless of the `LIMIT` command's value.
-
-This limit only applies to the number of rows that are retrieved by the query
-and displayed in Discover. Queries and aggregations run on the full data set.
-
-To overcome this limitation:
-
-* Reduce the result set size by modifying the query to only return relevant
-data. Use <> to select a smaller subset of the data.
-* Shift any post-query processing to the query itself. You can use the {esql}
-<> command to aggregate data in the query.
-* Increase the limit with the `esql.query.result_truncation_max_size` static
-cluster setting.
+of rows up to 10,000 using the <> command.
+include::processing-commands/limit.asciidoc[tag=limitation]
[discrete]
[[esql-supported-types]]
@@ -69,6 +57,7 @@ cluster setting.
** `completion`
** `dense_vector`
** `double_range`
+** `flattened`
** `float_range`
** `histogram`
** `integer_range`
@@ -112,6 +101,12 @@ you query, and query `keyword` sub-fields instead of `text` fields.
{esql} does not support querying time series data streams (TSDS).
+[discrete]
+[[esql-limitations-ccs]]
+=== {ccs-cap} is not supported
+
+{esql} does not support {ccs}.
+
[discrete]
[[esql-limitations-date-math]]
=== Date math limitations
@@ -142,6 +137,33 @@ now() - 2023-10-26
include::esql-enrich-data.asciidoc[tag=limitations]
+[discrete]
+[[esql-limitations-dissect]]
+=== Dissect limitations
+
+include::esql-process-data-with-dissect-grok.asciidoc[tag=dissect-limitations]
+
+[discrete]
+[[esql-limitations-grok]]
+=== Grok limitations
+
+include::esql-process-data-with-dissect-grok.asciidoc[tag=grok-limitations]
+
+[discrete]
+[[esql-limitations-mv]]
+=== Multivalue limitations
+
+{esql} <>, but functions
+return `null` when applied to a multivalued field, unless documented otherwise.
+Work around this limitation by converting the field to single value with one of
+the <>.
+
+[discrete]
+[[esql-limitations-timezone]]
+=== Timezone support
+
+{esql} only supports the UTC timezone.
+
[discrete]
[[esql-limitations-kibana]]
=== Kibana limitations
diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc
index a37989b2b2da8..8f235ed0b7add 100644
--- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc
+++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc
@@ -120,7 +120,6 @@ include::../ingest/processors/dissect.asciidoc[tag=dissect-key-modifiers]
| `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <>
| `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <>
| `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <>
-| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <>
|======
[[esql-dissect-modifier-skip-right-padding]]
@@ -139,9 +138,13 @@ include::../ingest/processors/dissect.asciidoc[tag=append-order-modifier]
====== Named skip key (`?`)
include::../ingest/processors/dissect.asciidoc[tag=named-skip-key]
-[[esql-reference-keys]]
-====== Reference keys (`*` and `&`)
-include::../ingest/processors/dissect.asciidoc[tag=reference-keys]
+[[esql-dissect-limitations]]
+===== Limitations
+
+// tag::dissect-limitations[]
+The `DISSECT` command does not support
+<>.
+// end::dissect-limitations[]
[[esql-process-data-with-grok]]
==== Process data with `GROK`
@@ -161,7 +164,14 @@ matches a log line of this format:
1.2.3.4 [2023-01-23T12:15:00.000Z] Connected
----
-and results in adding the following columns to the input table:
+Putting it together as an {esql} query:
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=grokWithEscape]
+----
+
+`GROK` adds the following columns to the input table:
[%header.monospaced.styled,format=dsv,separator=|]
|===
@@ -169,6 +179,25 @@ and results in adding the following columns to the input table:
2023-01-23T12:15:00.000Z | 1.2.3.4 | Connected
|===
+[NOTE]
+====
+
+Special regex characters in grok patterns, like `[` and `]` need to be escaped
+with a `\`. For example, in the earlier pattern:
+[source,txt]
+----
+%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status}
+----
+
+In {esql} queries, the backslash character itself is a special character that
+needs to be escaped with another `\`. For this example, the corresponding {esql}
+query becomes:
+[source.merge.styled,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=grokWithEscape]
+----
+====
+
[[esql-grok-patterns]]
===== Grok patterns
@@ -202,24 +231,6 @@ as well. Grok uses the Oniguruma regular expression library. Refer to
https://github.com/kkos/oniguruma/blob/master/doc/RE[the Oniguruma GitHub
repository] for the full supported regexp syntax.
-[NOTE]
-====
-Special regex characters like `[` and `]` need to be escaped with a `\`. For
-example, in the earlier pattern:
-[source,txt]
-----
-%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status}
-----
-
-In {esql} queries, the backslash character itself is a special character that
-needs to be escaped with another `\`. For this example, the corresponding {esql}
-query becomes:
-[source.merge.styled,esql]
-----
-include::{esql-specs}/docs.csv-spec[tag=grokWithEscape]
-----
-====
-
[[esql-custom-patterns]]
===== Custom patterns
@@ -253,6 +264,8 @@ as the `GROK` command.
[[esql-grok-limitations]]
===== Limitations
+// tag::grok-limitations[]
The `GROK` command does not support configuring <>, or <>. The `GROK` command is not
subject to <>.
+// end::grok-limitations[]
\ No newline at end of file
diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc
index 437871d31a88f..afa9ab7254cfa 100644
--- a/docs/reference/esql/esql-query-api.asciidoc
+++ b/docs/reference/esql/esql-query-api.asciidoc
@@ -68,11 +68,6 @@ responses. See <>.
`query`::
(Required, object) {esql} query to run. For syntax, refer to <>.
-[[esql-search-api-time-zone]]
-`time_zone`::
-(Optional, string) ISO-8601 time zone ID for the search. Several {esql}
-date/time functions use this time zone. Defaults to `Z` (UTC).
-
[discrete]
[role="child_attributes"]
[[esql-query-api-response-body]]
diff --git a/docs/reference/esql/esql-security-solution.asciidoc b/docs/reference/esql/esql-security-solution.asciidoc
new file mode 100644
index 0000000000000..45e8e44e44bdd
--- /dev/null
+++ b/docs/reference/esql/esql-security-solution.asciidoc
@@ -0,0 +1,41 @@
+[[esql-elastic-security]]
+=== Using {esql} in {elastic-sec}
+
+++++
+Using {esql} in {elastic-sec}
+++++
+
+You can use {esql} in {elastic-sec} to investigate events in Timeline and create
+detection rules. Use the Elastic AI Assistant to build {esql} queries, or answer
+questions about the {esql} query language.
+
+[discrete]
+[[esql-elastic-security-timeline]]
+=== Use {esql} to investigate events in Timeline
+
+You can use {esql} in Timeline to filter, transform, and analyze event data
+stored in {es}. To start using {esql}, open the the **{esql}** tab. To learn
+more, refer to {security-guide}/timelines-ui.html#esql-in-timeline[Investigate
+events in Timeline].
+
+[discrete]
+[[esql-elastic-security-detection-rules]]
+=== Use {esql} to create detection rules
+
+Use the {esql} rule type to create detection rules using {esql} queries. The
+{esql} rule type supports aggregating and non-aggregating queries. To learn
+more, refer to {security-guide}/rules-ui-create.html#create-esql-rule[Create an
+{esql} rule].
+
+[discrete]
+[[esql-elastic-security-ai-assistant]]
+=== Elastic AI Assistant
+
+Use the Elastic AI Assistant to build {esql} queries, or answer questions about
+the {esql} query language. To learn more, refer to
+{security-guide}/security-assistant.html[AI Assistant].
+
+NOTE: For AI Assistant to answer questions about {esql} and write {esql}
+queries, you need to
+{security-guide}/security-assistant.html#set-up-ai-assistant[enable knowledge
+base].
\ No newline at end of file
diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc
index 725b1d3ff1e03..22c9b1f100827 100644
--- a/docs/reference/esql/esql-syntax.asciidoc
+++ b/docs/reference/esql/esql-syntax.asciidoc
@@ -9,7 +9,7 @@
[[esql-basic-syntax]]
=== Basic syntax
-An {esql} query is composed of a <> followed
+An {esql} query is composed of a <> followed
by an optional series of <>,
separated by a pipe character: `|`. For example:
@@ -36,6 +36,101 @@ source-command | processing-command1 | processing-command2
----
====
+[discrete]
+[[esql-identifiers]]
+==== Identifiers
+
+The identifiers can be used as they are and don't require quoting, unless
+containing special characters, in which case they must be quoted with
+backticks (+{backtick}+). What "special characters" means is command dependent.
+
+For <>, <>, <>,
+<>, <> and
+<> these are: `=`, +{backtick}+, `,`, ` ` (space), `|` ,
+`[`, `]`, `\t` (TAB), `\r` (CR), `\n` (LF); one `/` is allowed unquoted, but
+a sequence of two or more require quoting.
+
+The rest of the commands - those allowing for identifiers be used in
+expressions - require quoting if the identifier contains characters other than
+letters, numbers and `_` and doesn't start with a letter, `_` or `@`.
+
+For instance:
+
+[source,esql]
+----
+// Retain just one field
+FROM index
+| KEEP 1.field
+----
+
+is legal. However, if same field is to be used with an <>,
+it'd have to be quoted:
+
+[source,esql]
+----
+// Copy one field
+FROM index
+| EVAL my_field = `1.field`
+----
+
+[discrete]
+[[esql-literals]]
+==== Literals
+
+{esql} currently supports numeric and string literals.
+
+[discrete]
+[[esql-string-literals]]
+===== String literals
+
+A string literal is a sequence of unicode characters delimited by double
+quotes (`"`).
+
+[source,esql]
+----
+// Filter by a string value
+FROM index
+| WHERE first_name == "Georgi"
+----
+
+If the literal string itself contains quotes, these need to be escaped (`\\"`).
+{esql} also supports the triple-quotes (`"""`) delimiter, for convenience:
+
+[source,esql]
+----
+ROW name = """Indiana "Indy" Jones"""
+----
+
+The special characters CR, LF and TAB can be provided with the usual escaping:
+`\r`, `\n`, `\t`, respectively.
+
+[discrete]
+[[esql-numeric-literals]]
+===== Numerical literals
+
+The numeric literals are accepted in decimal and in the scientific notation
+with the exponent marker (`e` or `E`), starting either with a digit, decimal
+point `.` or the negative sign `-`:
+
+[source, sql]
+----
+1969 -- integer notation
+3.14 -- decimal notation
+.1234 -- decimal notation starting with decimal point
+4E5 -- scientific notation (with exponent marker)
+1.2e-3 -- scientific notation with decimal point
+-.1e2 -- scientific notation starting with the negative sign
+----
+
+The integer numeric literals are implicitly converted to the `integer`, `long`
+or the `double` type, whichever can first accommodate the literal's value.
+
+The floating point literals are implicitly converted the `double` type.
+
+To obtain constant values of different types, use one of the numeric
+<>.
+
+
[discrete]
[[esql-comments]]
==== Comments
diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc
index f586f3a28de5c..235c7defe559b 100644
--- a/docs/reference/esql/esql-using.asciidoc
+++ b/docs/reference/esql/esql-using.asciidoc
@@ -6,11 +6,16 @@ Information about using the <>.
<>::
Using {esql} in {kib} to query and aggregate your data, create visualizations,
-and set up alerts.
+and set up alerts.
+
+<>::
+Using {esql} in {elastic-sec} to investigate events in Timeline, create
+detection rules, and build {esql} queries using Elastic AI Assistant.
<>::
Using the <> to list and cancel {esql} queries.
include::esql-rest.asciidoc[]
include::esql-kibana.asciidoc[]
-include::task-management.asciidoc[]
\ No newline at end of file
+include::esql-security-solution.asciidoc[]
+include::task-management.asciidoc[]
diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc
index b243adf875cb4..84ff083147cb9 100644
--- a/docs/reference/esql/functions/case.asciidoc
+++ b/docs/reference/esql/functions/case.asciidoc
@@ -4,7 +4,7 @@
*Syntax*
-[source,txt]
+[source,esql]
----
CASE(condition1, value1[, ..., conditionN, valueN][, default_value])
----
@@ -27,7 +27,8 @@ Accepts pairs of conditions and values. The function returns the value that
belongs to the first condition that evaluates to `true`.
If the number of arguments is odd, the last argument is the default value which
-is returned when no condition matches.
+is returned when no condition matches. If the number of arguments is even, and
+no condition matches, the function returns `null`.
*Example*
diff --git a/docs/reference/esql/functions/date_parse.asciidoc b/docs/reference/esql/functions/date_parse.asciidoc
index c74656ff1dbd7..9580ae238b663 100644
--- a/docs/reference/esql/functions/date_parse.asciidoc
+++ b/docs/reference/esql/functions/date_parse.asciidoc
@@ -4,7 +4,7 @@
*Syntax*
-[source,txt]
+[source,esql]
----
DATE_PARSE([format,] date_string)
----
diff --git a/docs/reference/esql/functions/date_trunc.asciidoc b/docs/reference/esql/functions/date_trunc.asciidoc
index cacfefe73d0fd..ad0e1eb1170b4 100644
--- a/docs/reference/esql/functions/date_trunc.asciidoc
+++ b/docs/reference/esql/functions/date_trunc.asciidoc
@@ -8,6 +8,6 @@ Rounds down a date to the closest interval. Intervals can be expressed using the
----
FROM employees
| EVAL year_hired = DATE_TRUNC(1 year, hire_date)
-| STATS count(emp_no) BY year_hired
+| STATS COUNT(emp_no) BY year_hired
| SORT year_hired
----
diff --git a/docs/reference/esql/functions/starts_with.asciidoc b/docs/reference/esql/functions/starts_with.asciidoc
index 38cee79ea63f8..f98a76ef68206 100644
--- a/docs/reference/esql/functions/starts_with.asciidoc
+++ b/docs/reference/esql/functions/starts_with.asciidoc
@@ -2,7 +2,7 @@
[[esql-starts_with]]
=== `STARTS_WITH`
[.text-center]
-image::esql/functions/signature/ends_with.svg[Embedded,opts=inline]
+image::esql/functions/signature/starts_with.svg[Embedded,opts=inline]
Returns a boolean that indicates whether a keyword string starts with another
string:
diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc
index 2946f4e61d629..dcbe426b1bcac 100644
--- a/docs/reference/esql/index.asciidoc
+++ b/docs/reference/esql/index.asciidoc
@@ -55,14 +55,14 @@ fields>> and <>. And guidance for
GROK>> and <>.
<>::
-An overview of using the <>, <>, and
-<>.
+An overview of using the <>, <>,
+<>, and <>.
<>::
The current limitations of {esql}.
<>::
-A few examples of what you can with {esql}.
+A few examples of what you can do with {esql}.
include::esql-get-started.asciidoc[]
diff --git a/docs/reference/esql/processing-commands/dissect.asciidoc b/docs/reference/esql/processing-commands/dissect.asciidoc
index eca10c201c968..c48b72af0de7e 100644
--- a/docs/reference/esql/processing-commands/dissect.asciidoc
+++ b/docs/reference/esql/processing-commands/dissect.asciidoc
@@ -4,9 +4,9 @@
**Syntax**
-[source,txt]
+[source,esql]
----
-DISSECT input "pattern" [ append_separator=""]
+DISSECT input "pattern" [APPEND_SEPARATOR=""]
----
*Parameters*
@@ -16,9 +16,9 @@ The column that contains the string you want to structure. If the column has
multiple values, `DISSECT` will process each value.
`pattern`::
-A dissect pattern.
+A <>.
-`append_separator=""`::
+``::
A string used as the separator between appended values, when using the <>.
*Description*
@@ -29,7 +29,7 @@ delimiter-based pattern, and extracts the specified keys as columns.
Refer to <> for the syntax of dissect patterns.
-*Example*
+*Examples*
// tag::examples[]
The following example parses a string that contains a timestamp, some text, and
diff --git a/docs/reference/esql/processing-commands/drop.asciidoc b/docs/reference/esql/processing-commands/drop.asciidoc
index 50e3b27fb1b28..4787c5f137314 100644
--- a/docs/reference/esql/processing-commands/drop.asciidoc
+++ b/docs/reference/esql/processing-commands/drop.asciidoc
@@ -2,7 +2,23 @@
[[esql-drop]]
=== `DROP`
-Use `DROP` to remove columns:
+**Syntax**
+
+[source,esql]
+----
+DROP columns
+----
+
+*Parameters*
+
+`columns`::
+A comma-separated list of columns to remove. Supports wildcards.
+
+*Description*
+
+The `DROP` processing command removes one or more columns.
+
+*Examples*
[source,esql]
----
diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc
index df402f3b1bd50..603683858b8c0 100644
--- a/docs/reference/esql/processing-commands/enrich.asciidoc
+++ b/docs/reference/esql/processing-commands/enrich.asciidoc
@@ -4,7 +4,7 @@
**Syntax**
-[source,txt]
+[source,esql]
----
ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, ...]
----
@@ -15,18 +15,18 @@ ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2,
The name of the enrich policy. You need to <>
and <> the enrich policy first.
-`ON match_field`::
+`match_field`::
The match field. `ENRICH` uses its value to look for records in the enrich
index. If not specified, the match will be performed on the column with the same
name as the `match_field` defined in the <>.
-`WITH fieldX`::
+`fieldX`::
The enrich fields from the enrich index that are added to the result as new
columns. If a column with the same name as the enrich field already exists, the
existing column will be replaced by the new column. If not specified, each of
the enrich fields defined in the policy is added
-`new_nameX =`::
+`new_nameX`::
Enables you to change the name of the column that's added for each of the enrich
fields. Defaults to the enrich field name.
@@ -74,7 +74,7 @@ include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_on-result]
By default, each of the enrich fields defined in the policy is added as a
column. To explicitly select the enrich fields that are added, use
-`WITH , ...`:
+`WITH , , ...`:
[source.merge.styled,esql]
----
diff --git a/docs/reference/esql/processing-commands/eval.asciidoc b/docs/reference/esql/processing-commands/eval.asciidoc
index a0a78f2a3bf97..eb69a587014ab 100644
--- a/docs/reference/esql/processing-commands/eval.asciidoc
+++ b/docs/reference/esql/processing-commands/eval.asciidoc
@@ -1,7 +1,30 @@
[discrete]
[[esql-eval]]
=== `EVAL`
-`EVAL` enables you to append new columns:
+
+**Syntax**
+
+[source,esql]
+----
+EVAL column1 = value1[, ..., columnN = valueN]
+----
+
+*Parameters*
+
+`columnX`::
+The column name.
+
+`valueX`::
+The value for the column. Can be a literal, an expression, or a
+<>.
+
+*Description*
+
+The `EVAL` processing command enables you to append new columns with calculated
+values. `EVAL` supports various functions for calculating values. Refer to
+<> for more information.
+
+*Examples*
[source.merge.styled,esql]
----
@@ -23,8 +46,3 @@ include::{esql-specs}/docs.csv-spec[tag=evalReplace]
|===
include::{esql-specs}/docs.csv-spec[tag=evalReplace-result]
|===
-
-[discrete]
-==== Functions
-`EVAL` supports various functions for calculating values. Refer to
-<> for more information.
diff --git a/docs/reference/esql/processing-commands/grok.asciidoc b/docs/reference/esql/processing-commands/grok.asciidoc
index c95fe59f888ce..d5d58a9eaee12 100644
--- a/docs/reference/esql/processing-commands/grok.asciidoc
+++ b/docs/reference/esql/processing-commands/grok.asciidoc
@@ -4,7 +4,7 @@
**Syntax**
-[source,txt]
+[source,esql]
----
GROK input "pattern"
----
diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc
index 3e54e5a7d1c5c..7515583b1bfd1 100644
--- a/docs/reference/esql/processing-commands/keep.asciidoc
+++ b/docs/reference/esql/processing-commands/keep.asciidoc
@@ -2,11 +2,25 @@
[[esql-keep]]
=== `KEEP`
-The `KEEP` command enables you to specify what columns are returned and the
-order in which they are returned.
+**Syntax**
-To limit the columns that are returned, use a comma-separated list of column
-names. The columns are returned in the specified order:
+[source,esql]
+----
+KEEP columns
+----
+
+*Parameters*
+`columns`::
+A comma-separated list of columns to keep. Supports wildcards.
+
+*Description*
+
+The `KEEP` processing command enables you to specify what columns are returned
+and the order in which they are returned.
+
+*Examples*
+
+The columns are returned in the specified order:
[source.merge.styled,esql]
----
@@ -27,7 +41,7 @@ include::{esql-specs}/docs.csv-spec[tag=keepWildcard]
The asterisk wildcard (`*`) by itself translates to all columns that do not
match the other arguments. This query will first return all columns with a name
-that starts with an h, followed by all other columns:
+that starts with `h`, followed by all other columns:
[source,esql]
----
diff --git a/docs/reference/esql/processing-commands/limit.asciidoc b/docs/reference/esql/processing-commands/limit.asciidoc
index c02b534af59e1..5f659fc493a75 100644
--- a/docs/reference/esql/processing-commands/limit.asciidoc
+++ b/docs/reference/esql/processing-commands/limit.asciidoc
@@ -2,12 +2,46 @@
[[esql-limit]]
=== `LIMIT`
-The `LIMIT` processing command enables you to limit the number of rows:
+**Syntax**
[source,esql]
----
-include::{esql-specs}/docs.csv-spec[tag=limit]
+LIMIT max_number_of_rows
----
-If not specified, `LIMIT` defaults to `500`. A single query will not return
-more than 10,000 rows, regardless of the `LIMIT` value.
+*Parameters*
+
+`max_number_of_rows`::
+The maximum number of rows to return.
+
+*Description*
+
+The `LIMIT` processing command enables you to limit the number of rows that are
+returned.
+// tag::limitation[]
+Queries do not return more than 10,000 rows, regardless of the `LIMIT` command's
+value.
+
+This limit only applies to the number of rows that are retrieved by the query.
+Queries and aggregations run on the full data set.
+
+To overcome this limitation:
+
+* Reduce the result set size by modifying the query to only return relevant
+data. Use <> to select a smaller subset of the data.
+* Shift any post-query processing to the query itself. You can use the {esql}
+<> command to aggregate data in the query.
+
+The default and maximum limits can be changed using these dynamic cluster
+settings:
+
+* `esql.query.result_truncation_default_size`
+* `esql.query.result_truncation_max_size`
+// end::limitation[]
+
+*Example*
+
+[source,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=limit]
+----
diff --git a/docs/reference/esql/processing-commands/mv_expand.asciidoc b/docs/reference/esql/processing-commands/mv_expand.asciidoc
index d62b28aabe440..46dc4fd0a33cf 100644
--- a/docs/reference/esql/processing-commands/mv_expand.asciidoc
+++ b/docs/reference/esql/processing-commands/mv_expand.asciidoc
@@ -2,7 +2,24 @@
[[esql-mv_expand]]
=== `MV_EXPAND`
-The `MV_EXPAND` processing command expands multivalued fields into one row per value, duplicating other fields:
+**Syntax**
+
+[source,esql]
+----
+MV_EXPAND column
+----
+
+*Parameters*
+
+`column`::
+The multivalued column to expand.
+
+*Description*
+
+The `MV_EXPAND` processing command expands multivalued columns into one row per
+value, duplicating other columns.
+
+*Example*
[source.merge.styled,esql]
----
diff --git a/docs/reference/esql/processing-commands/rename.asciidoc b/docs/reference/esql/processing-commands/rename.asciidoc
index 1dda424317976..773fe8b640f75 100644
--- a/docs/reference/esql/processing-commands/rename.asciidoc
+++ b/docs/reference/esql/processing-commands/rename.asciidoc
@@ -2,22 +2,33 @@
[[esql-rename]]
=== `RENAME`
-Use `RENAME` to rename a column using the following syntax:
+**Syntax**
[source,esql]
----
-RENAME AS
+RENAME old_name1 AS new_name1[, ..., old_nameN AS new_nameN]
----
-For example:
+*Parameters*
+
+`old_nameX`::
+The name of a column you want to rename.
+
+`new_nameX`::
+The new name of the column.
+
+*Description*
+
+The `RENAME` processing command renames one or more columns. If a column with
+the new name already exists, it will be replaced by the new column.
+
+*Examples*
[source,esql]
----
include::{esql-specs}/docs.csv-spec[tag=rename]
----
-If a column with the new name already exists, it will be replaced by the new
-column.
Multiple columns can be renamed with a single `RENAME` command:
diff --git a/docs/reference/esql/processing-commands/sort.asciidoc b/docs/reference/esql/processing-commands/sort.asciidoc
index 76a9193375932..fea7bfaf0c65f 100644
--- a/docs/reference/esql/processing-commands/sort.asciidoc
+++ b/docs/reference/esql/processing-commands/sort.asciidoc
@@ -1,35 +1,59 @@
[discrete]
[[esql-sort]]
=== `SORT`
-Use the `SORT` command to sort rows on one or more fields:
+
+**Syntax**
+
+[source,esql]
+----
+SORT column1 [ASC/DESC][NULLS FIRST/NULLS LAST][, ..., columnN [ASC/DESC][NULLS FIRST/NULLS LAST]]
+----
+
+*Parameters*
+
+`columnX`::
+The column to sort on.
+
+*Description*
+
+The `SORT` processing command sorts a table on one or more columns.
+
+The default sort order is ascending. Use `ASC` or `DESC` to specify an explicit
+sort order.
+
+Two rows with the same sort key are considered equal. You can provide additional
+sort expressions to act as tie breakers.
+
+Sorting on multivalued columns uses the lowest value when sorting ascending and
+the highest value when sorting descending.
+
+By default, `null` values are treated as being larger than any other value. With
+an ascending sort order, `null` values are sorted last, and with a descending
+sort order, `null` values are sorted first. You can change that by providing
+`NULLS FIRST` or `NULLS LAST`.
+
+*Examples*
[source,esql]
----
include::{esql-specs}/docs.csv-spec[tag=sort]
----
-The default sort order is ascending. Set an explicit sort order using `ASC` or
-`DESC`:
+Explicitly sorting in ascending order with `ASC`:
[source,esql]
----
include::{esql-specs}/docs.csv-spec[tag=sortDesc]
----
-Two rows with the same sort key are considered equal. You can provide additional
-sort expressions to act as tie breakers:
+Providing additional sort expressions to act as tie breakers:
[source,esql]
----
include::{esql-specs}/docs.csv-spec[tag=sortTie]
----
-[discrete]
-==== `null` values
-By default, `null` values are treated as being larger than any other value. With
-an ascending sort order, `null` values are sorted last, and with a descending
-sort order, `null` values are sorted first. You can change that by providing
-`NULLS FIRST` or `NULLS LAST`:
+Sorting `null` values first using `NULLS FIRST`:
[source,esql]
----
diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc
index e0a9bbb52b03e..cbdb74d350fb1 100644
--- a/docs/reference/esql/processing-commands/stats.asciidoc
+++ b/docs/reference/esql/processing-commands/stats.asciidoc
@@ -1,8 +1,49 @@
[discrete]
[[esql-stats-by]]
=== `STATS ... BY`
-Use `STATS ... BY` to group rows according to a common value and calculate one
-or more aggregated values over the grouped rows.
+
+**Syntax**
+
+[source,esql]
+----
+STATS [column1 =] expression1[, ..., [columnN =] expressionN] [BY grouping_column1[, ..., grouping_columnN]]
+----
+
+*Parameters*
+
+`columnX`::
+The name by which the aggregated value is returned. If omitted, the name is
+equal to the corresponding expression (`expressionX`).
+
+`expressionX`::
+An expression that computes an aggregated value.
+
+`grouping_columnX`::
+The column containing the values to group by.
+
+*Description*
+
+The `STATS ... BY` processing command groups rows according to a common value
+and calculate one or more aggregated values over the grouped rows. If `BY` is
+omitted, the output table contains exactly one row with the aggregations applied
+over the entire dataset.
+
+The following aggregation functions are supported:
+
+include::../functions/aggregation-functions.asciidoc[tag=agg_list]
+
+NOTE: `STATS` without any groups is much much faster than adding a group.
+
+NOTE: Grouping on a single column is currently much more optimized than grouping
+ on many columns. In some tests we have seen grouping on a single `keyword`
+ column to be five times faster than grouping on two `keyword` columns. Do
+ not try to work around this by combining the two columns together with
+ something like <> and then grouping - that is not going to be
+ faster.
+
+*Examples*
+
+Calculating a statistic and grouping by the values of another column:
[source.merge.styled,esql]
----
@@ -13,8 +54,8 @@ include::{esql-specs}/docs.csv-spec[tag=stats]
include::{esql-specs}/docs.csv-spec[tag=stats-result]
|===
-If `BY` is omitted, the output table contains exactly one row with the
-aggregations applied over the entire dataset:
+Omitting `BY` returns one row with the aggregations applied over the entire
+dataset:
[source.merge.styled,esql]
----
@@ -39,15 +80,3 @@ keyword family fields):
----
include::{esql-specs}/docs.csv-spec[tag=statsGroupByMultipleValues]
----
-
-The following aggregation functions are supported:
-
-include::../functions/aggregation-functions.asciidoc[tag=agg_list]
-
-NOTE: `STATS` without any groups is much much faster than adding group.
-
-NOTE: Grouping on a single field is currently much more optimized than grouping
- on many fields. In some tests we've seen grouping on a single `keyword`
- field to be five times faster than grouping on two `keyword` fields. Don't
- try to work around this combining the two fields together with something
- like <> and then grouping - that's not going to be faster.
diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc
index 8dd55df12b9e7..e723a977bf99c 100644
--- a/docs/reference/esql/processing-commands/where.asciidoc
+++ b/docs/reference/esql/processing-commands/where.asciidoc
@@ -2,8 +2,27 @@
[[esql-where]]
=== `WHERE`
-Use `WHERE` to produce a table that contains all the rows from the input table
-for which the provided condition evaluates to `true`:
+**Syntax**
+
+[source,esql]
+----
+WHERE expression
+----
+
+*Parameters*
+
+`expression`::
+A boolean expression.
+
+*Description*
+
+The `WHERE` processing command produces a table that contains all the rows from
+the input table for which the provided condition evaluates to `true`.
+
+`WHERE` supports various <> and
+<>.
+
+*Examples*
[source,esql]
----
@@ -17,15 +36,7 @@ Which, if `still_hired` is a boolean field, can be simplified to:
include::{esql-specs}/docs.csv-spec[tag=whereBoolean]
----
-[discrete]
-==== Operators
-
-Refer to <> for an overview of the supported operators.
-
-[discrete]
-==== Functions
-`WHERE` supports various functions for calculating values. Refer to
-<> for more information.
+Using a function:
[source,esql]
----
diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc
index 5718bfc27ac1c..6f54a42ddad35 100644
--- a/docs/reference/esql/source-commands/from.asciidoc
+++ b/docs/reference/esql/source-commands/from.asciidoc
@@ -2,10 +2,47 @@
[[esql-from]]
=== `FROM`
-The `FROM` source command returns a table with up to 10,000 documents from a
-data stream, index, or alias. Each row in the resulting table represents a
-document. Each column corresponds to a field, and can be accessed by the name
-of that field.
+**Syntax**
+
+[source,esql]
+----
+FROM index_pattern [METADATA fields]
+----
+
+*Parameters*
+
+`index_pattern`::
+A list of indices, data streams or aliases. Supports wildcards and date math.
+
+`fields`::
+A comma-separated list of <> to retrieve.
+
+*Description*
+
+The `FROM` source command returns a table with data from a data stream, index,
+or alias. Each row in the resulting table represents a document. Each column
+corresponds to a field, and can be accessed by the name of that field.
+
+[NOTE]
+====
+By default, an {esql} query without an explicit <> uses an implicit
+limit of 500. This applies to `FROM` too. A `FROM` command without `LIMIT`:
+
+[source,esql]
+----
+FROM employees
+----
+
+is executed as:
+
+[source,esql]
+----
+FROM employees
+| LIMIT 500
+----
+====
+
+*Examples*
[source,esql]
----
diff --git a/docs/reference/esql/source-commands/row.asciidoc b/docs/reference/esql/source-commands/row.asciidoc
index edfe5ecbf7cf3..adce844f365b8 100644
--- a/docs/reference/esql/source-commands/row.asciidoc
+++ b/docs/reference/esql/source-commands/row.asciidoc
@@ -2,9 +2,29 @@
[[esql-row]]
=== `ROW`
+**Syntax**
+
+[source,esql]
+----
+ROW column1 = value1[, ..., columnN = valueN]
+----
+
+*Parameters*
+
+`columnX`::
+The column name.
+
+`valueX`::
+The value for the column. Can be a literal, an expression, or a
+<>.
+
+*Description*
+
The `ROW` source command produces a row with one or more columns with values
that you specify. This can be useful for testing.
+*Examples*
+
[source.merge.styled,esql]
----
include::{esql-specs}/row.csv-spec[tag=example]
diff --git a/docs/reference/esql/source-commands/show.asciidoc b/docs/reference/esql/source-commands/show.asciidoc
index 956baf628e9f3..ea8c83ceb772a 100644
--- a/docs/reference/esql/source-commands/show.asciidoc
+++ b/docs/reference/esql/source-commands/show.asciidoc
@@ -1,10 +1,35 @@
[discrete]
[[esql-show]]
-=== `SHOW `
+=== `SHOW`
-The `SHOW ` source command returns information about the deployment and
+**Syntax**
+
+[source,esql]
+----
+SHOW item
+----
+
+*Parameters*
+
+`item`::
+Can be `INFO` or `FUNCTIONS`.
+
+*Description*
+
+The `SHOW` source command returns information about the deployment and
its capabilities:
* Use `SHOW INFO` to return the deployment's version, build date and hash.
* Use `SHOW FUNCTIONS` to return a list of all supported functions and a
synopsis of each function.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/show.csv-spec[tag=showFunctionsFiltered]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/show.csv-spec[tag=showFunctionsFiltered-result]
+|===
diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc
index 4f15bb1c1d694..31fe747feb63b 100644
--- a/docs/reference/index-modules.asciidoc
+++ b/docs/reference/index-modules.asciidoc
@@ -16,7 +16,10 @@ Index level settings can be set per-index. Settings may be:
_static_::
They can only be set at index creation time or on a
-<>.
+<>, or by using the
+<> API with the
+`reopen` query parameter set to `true` (which automatically
+closes and reopens impacted indices).
_dynamic_::
diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc
index 1f405a2e49a7a..c919bba5c7651 100644
--- a/docs/reference/indices/resolve.asciidoc
+++ b/docs/reference/indices/resolve.asciidoc
@@ -88,9 +88,11 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
+
Defaults to `true`.
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled]
+`ignore_throttled`::
+(Optional, Boolean) If `true`, concrete, expanded or aliased indices are
+ignored when frozen. Defaults to `false`.
+
-Defaults to `false`.
+deprecated:[7.16.0]
[[resolve-index-api-example]]
==== {api-examples-title}
diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc
index 45531dd58ccfc..1ac9ecbb6a6a3 100644
--- a/docs/reference/indices/update-settings.asciidoc
+++ b/docs/reference/indices/update-settings.asciidoc
@@ -60,6 +60,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailab
(Optional, Boolean) If `true`, existing index settings remain unchanged.
Defaults to `false`.
+`reopen`::
+(Optional, Boolean) If `true`, then any static settings that would ordinarily only
+be updated on closed indices will be updated by automatically closing and reopening
+the affected indices. If `false`, attempts to update static settings on open indices
+will fail. Defaults to `false`.
+
+NOTE: Changing index settings on an automatically closed index using the `reopen`
+parameter will result in the index becoming unavailable momentarily while the index
+is in the process of reopening.
+
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc
index f26a73d093091..f8515a8b33c39 100644
--- a/docs/reference/inference/post-inference.asciidoc
+++ b/docs/reference/inference/post-inference.asciidoc
@@ -25,9 +25,9 @@ Performs an inference task on an input text by using an {infer} model.
[[post-inference-api-desc]]
==== {api-description-title}
-The perform {infer} API enables you to use {infer} models to perform specific
-tasks on data that you provide as an input. The API returns a response with the
-resutls of the tasks. The {infer} model you use can perform one specific task
+The perform {infer} API enables you to use {infer} models to perform specific
+tasks on data that you provide as an input. The API returns a response with the
+resutls of the tasks. The {infer} model you use can perform one specific task
that has been defined when the model was created with the <>.
@@ -50,8 +50,9 @@ The type of {infer} task that the model performs.
== {api-request-body-title}
`input`::
-(Required, string)
+(Required, array of strings)
The text on which you want to perform the {infer} task.
+`input` can be a single string or an array.
[discrete]
@@ -77,23 +78,26 @@ The API returns the following response:
[source,console-result]
------------------------------------------------------------
{
- "sparse_embedding": {
- "port": 2.1259406,
- "sky": 1.7073475,
- "color": 1.6922266,
- "dead": 1.6247464,
- "television": 1.3525393,
- "above": 1.2425821,
- "tuned": 1.1440028,
- "colors": 1.1218185,
- "tv": 1.0111054,
- "ports": 1.0067928,
- "poem": 1.0042328,
- "channel": 0.99471164,
- "tune": 0.96235967,
- "scene": 0.9020516,
+ "sparse_embedding": [
+ {
+ "port": 2.1259406,
+ "sky": 1.7073475,
+ "color": 1.6922266,
+ "dead": 1.6247464,
+ "television": 1.3525393,
+ "above": 1.2425821,
+ "tuned": 1.1440028,
+ "colors": 1.1218185,
+ "tv": 1.0111054,
+ "ports": 1.0067928,
+ "poem": 1.0042328,
+ "channel": 0.99471164,
+ "tune": 0.96235967,
+ "scene": 0.9020516,
+ (...)
+ },
(...)
- }
+ ]
}
------------------------------------------------------------
-// NOTCONSOLE
\ No newline at end of file
+// NOTCONSOLE
diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc
index fad11b28858b7..48505ab314c1e 100644
--- a/docs/reference/ingest/search-inference-processing.asciidoc
+++ b/docs/reference/ingest/search-inference-processing.asciidoc
@@ -54,7 +54,7 @@ A common use case is a user searching FAQs, or a support agent searching a knowl
The diagram below shows how documents are processed during ingestion.
// Original diagram: https://whimsical.com/ml-in-enterprise-search-ErCetPqrcCPu2QYHvAwrgP@2bsEvpTYSt1Hiuq6UBf68tUWvFiXdzLt6ao
-image::../images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"]
+image::images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"]
* Documents are processed by the `my-index-0001` pipeline, which happens automatically when indexing through a an Elastic connector or crawler.
* The `_run_ml_inference` field is set to `true` to ensure the ML inference pipeline (`my-index-0001@ml-inference`) is executed.
@@ -95,7 +95,7 @@ Once your index-specific ML inference pipeline is ready, you can add inference p
To add an inference processor to the ML inference pipeline, click the *Add Inference Pipeline* button in the *Machine Learning Inference Pipelines* card.
[role="screenshot"]
-image::../images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"]
+image::images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"]
Here, you'll be able to:
diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc
index 049a74670581d..f37e07f632810 100644
--- a/docs/reference/ingest/search-ingest-pipelines.asciidoc
+++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc
@@ -22,7 +22,7 @@ To find this tab in the Kibana UI:
The tab is highlighted in this screenshot:
[.screenshot]
-image::../images/ingest/ingest-pipeline-ent-search-ui.png[align="center"]
+image::images/ingest/ingest-pipeline-ent-search-ui.png[align="center"]
[discrete#ingest-pipeline-search-in-enterprise-search]
=== Overview
diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc
index 1ddd0cfa28128..a53a5770fe030 100644
--- a/docs/reference/landing-page.asciidoc
+++ b/docs/reference/landing-page.asciidoc
@@ -105,6 +105,9 @@