diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index 96f94755a2758..2c205f9090ba8 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -551,13 +551,19 @@ When running `./gradlew check`, minimal bwc checks are also run against compatib
==== BWC Testing against a specific remote/branch
-Sometimes a backward compatibility change spans two versions. A common case is a new functionality
-that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x).
-To test the changes, you can instruct Gradle to build the BWC version from another remote/branch combination instead of
-pulling the release branch from GitHub. You do so using the `bwc.remote` and `bwc.refspec.BRANCH` system properties:
+Sometimes a backward compatibility change spans two versions.
+A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x).
+Another use case, since the introduction of serverless, is to test BWC against main in addition to the other released branches.
+To do so, specify the `bwc.refspec` remote and branch to use for the BWC build as `origin/main`.
+To test against main, you will also need to create a new version in link:./server/src/main/java/org/elasticsearch/Version.java[Version.java],
+increment `elasticsearch` in link:./build-tools-internal/version.properties[version.properties], and hard-code the `project.version` for ml-cpp
+in link:./x-pack/plugin/ml/build.gradle[ml/build.gradle].
+
+In general, to test the changes, you can instruct Gradle to build the BWC version from another remote/branch combination instead of pulling the release branch from GitHub.
+You do so using the `bwc.refspec.{VERSION}` system property:
-------------------------------------------------
-./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x
+./gradlew check -Dtests.bwc.refspec.8.15=origin/main
-------------------------------------------------
The branch needs to be available on the remote that the BWC makes of the
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java
index 9511a6bc01e08..70e9fe424e77b 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java
@@ -21,6 +21,7 @@
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.LowercaseNormalizer;
import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ProvidedIdFieldMapper;
@@ -71,7 +72,8 @@ public static MapperService create(String mappings) {
public T compile(Script script, ScriptContext scriptContext) {
throw new UnsupportedOperationException();
}
- }
+ },
+ MapperMetrics.NOOP
);
try {
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java
index b6cbc3e7cce02..14f6fe6501a73 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java
@@ -29,6 +29,7 @@
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
@@ -154,7 +155,8 @@ protected SearchExecutionContext buildSearchExecutionContext() {
null,
() -> true,
null,
- Collections.emptyMap()
+ Collections.emptyMap(),
+ MapperMetrics.NOOP
);
}
@@ -186,7 +188,8 @@ protected final MapperService createMapperService(String mappings) {
public T compile(Script script, ScriptContext scriptContext) {
throw new UnsupportedOperationException();
}
- }
+ },
+ MapperMetrics.NOOP
);
try {
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java
index 24df3c4dab464..58b967d0a7722 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java
@@ -60,6 +60,7 @@
import static org.gradle.api.JavaVersion.VERSION_20;
import static org.gradle.api.JavaVersion.VERSION_21;
import static org.gradle.api.JavaVersion.VERSION_22;
+import static org.gradle.api.JavaVersion.VERSION_23;
@CacheableTask
public abstract class ThirdPartyAuditTask extends DefaultTask {
@@ -336,8 +337,8 @@ private String runForbiddenAPIsCli() throws IOException {
spec.setExecutable(javaHome.get() + "/bin/java");
}
spec.classpath(getForbiddenAPIsClasspath(), classpath);
- // Enable explicitly for each release as appropriate. Just JDK 20/21/22 for now, and just the vector module.
- if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21) || isJavaVersion(VERSION_22)) {
+ // Enable explicitly for each release as appropriate. Just JDK 20/21/22/23 for now, and just the vector module.
+ if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21) || isJavaVersion(VERSION_22) || isJavaVersion(VERSION_23)) {
spec.jvmArgs("--add-modules", "jdk.incubator.vector");
}
spec.jvmArgs("-Xmx1g");
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java
index 0270ee22ca8c5..89a40711c9a19 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java
@@ -11,7 +11,6 @@
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.commons.compress.utils.Lists;
import org.gradle.jvm.toolchain.JavaLanguageVersion;
import org.gradle.jvm.toolchain.JavaToolchainDownload;
import org.gradle.jvm.toolchain.JavaToolchainRequest;
@@ -21,17 +20,17 @@
import java.io.IOException;
import java.net.URI;
import java.net.URL;
-import java.util.Comparator;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.StreamSupport;
import static org.gradle.jvm.toolchain.JavaToolchainDownload.fromUri;
public abstract class AdoptiumJdkToolchainResolver extends AbstractCustomJavaToolchainResolver {
// package protected for better testing
- final Map> CACHED_SEMVERS = new ConcurrentHashMap<>();
+ final Map> CACHED_RELEASES = new ConcurrentHashMap<>();
@Override
public Optional resolve(JavaToolchainRequest request) {
@@ -39,7 +38,7 @@ public Optional resolve(JavaToolchainRequest request) {
return Optional.empty();
}
AdoptiumVersionRequest versionRequestKey = toVersionRequest(request);
- Optional versionInfo = CACHED_SEMVERS.computeIfAbsent(
+ Optional versionInfo = CACHED_RELEASES.computeIfAbsent(
versionRequestKey,
(r) -> resolveAvailableVersion(versionRequestKey)
);
@@ -54,12 +53,12 @@ private AdoptiumVersionRequest toVersionRequest(JavaToolchainRequest request) {
return new AdoptiumVersionRequest(platform, arch, javaLanguageVersion);
}
- private Optional resolveAvailableVersion(AdoptiumVersionRequest requestKey) {
+ private Optional resolveAvailableVersion(AdoptiumVersionRequest requestKey) {
ObjectMapper mapper = new ObjectMapper();
try {
int languageVersion = requestKey.languageVersion.asInt();
URL source = new URL(
- "https://api.adoptium.net/v3/info/release_versions?architecture="
+ "https://api.adoptium.net/v3/info/release_names?architecture="
+ requestKey.arch
+ "&image_type=jdk&os="
+ requestKey.platform
@@ -71,14 +70,8 @@ private Optional resolveAvailableVersion(AdoptiumVersionReq
+ ")"
);
JsonNode jsonNode = mapper.readTree(source);
- JsonNode versionsNode = jsonNode.get("versions");
- return Optional.of(
- Lists.newArrayList(versionsNode.iterator())
- .stream()
- .map(this::toVersionInfo)
- .max(Comparator.comparing(AdoptiumVersionInfo::semver))
- .get()
- );
+ JsonNode versionsNode = jsonNode.get("releases");
+ return StreamSupport.stream(versionsNode.spliterator(), false).map(JsonNode::textValue).findFirst();
} catch (FileNotFoundException e) {
// request combo not supported (e.g. aarch64 + windows
return Optional.empty();
@@ -87,21 +80,10 @@ private Optional resolveAvailableVersion(AdoptiumVersionReq
}
}
- private AdoptiumVersionInfo toVersionInfo(JsonNode node) {
- return new AdoptiumVersionInfo(
- node.get("build").asInt(),
- node.get("major").asInt(),
- node.get("minor").asInt(),
- node.get("openjdk_version").asText(),
- node.get("security").asInt(),
- node.get("semver").asText()
- );
- }
-
- private URI resolveDownloadURI(AdoptiumVersionRequest request, AdoptiumVersionInfo versionInfo) {
+ private URI resolveDownloadURI(AdoptiumVersionRequest request, String version) {
return URI.create(
- "https://api.adoptium.net/v3/binary/version/jdk-"
- + versionInfo.semver
+ "https://api.adoptium.net/v3/binary/version/"
+ + version
+ "/"
+ request.platform
+ "/"
@@ -118,7 +100,5 @@ private boolean requestIsSupported(JavaToolchainRequest request) {
return anyVendorOr(request.getJavaToolchainSpec().getVendor().get(), JvmVendorSpec.ADOPTIUM);
}
- record AdoptiumVersionInfo(int build, int major, int minor, String openjdkVersion, int security, String semver) {}
-
record AdoptiumVersionRequest(String platform, String arch, JavaLanguageVersion languageVersion) {}
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java
index 818cb040c172e..162895fd486cf 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java
@@ -39,11 +39,7 @@ record JdkBuild(JavaLanguageVersion languageVersion, String version, String buil
);
// package private so it can be replaced by tests
- List builds = List.of(
- getBundledJdkBuild(),
- // 22 release candidate
- new JdkBuild(JavaLanguageVersion.of(22), "22", "36", "830ec9fcccef480bb3e73fb7ecafe059")
- );
+ List builds = List.of(getBundledJdkBuild());
private JdkBuild getBundledJdkBuild() {
String bundledJdkVersion = VersionProperties.getBundledJdkVersion();
diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy
index 6383d577f027f..fe4a644ddfc1d 100644
--- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy
+++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy
@@ -11,7 +11,6 @@ package org.elasticsearch.gradle.internal.toolchain
import org.gradle.api.services.BuildServiceParameters
import org.gradle.jvm.toolchain.JavaLanguageVersion
import org.gradle.jvm.toolchain.JavaToolchainResolver
-import org.gradle.platform.OperatingSystem
import static org.elasticsearch.gradle.internal.toolchain.AbstractCustomJavaToolchainResolver.toArchString
import static org.elasticsearch.gradle.internal.toolchain.AbstractCustomJavaToolchainResolver.toOsString
@@ -38,12 +37,7 @@ class AdoptiumJdkToolchainResolverSpec extends AbstractToolchainResolverSpec {
toOsString(it[2], it[1]),
toArchString(it[3]),
languageVersion);
- resolver.CACHED_SEMVERS.put(request, Optional.of(new AdoptiumJdkToolchainResolver.AdoptiumVersionInfo(languageVersion.asInt(),
- 1,
- 1,
- "" + languageVersion.asInt() + ".1.1.1+37",
- 0, "" + languageVersion.asInt() + ".1.1.1+37.1"
- )))
+ resolver.CACHED_RELEASES.put(request, Optional.of('jdk-' + languageVersion.asInt() + '.1.1.1+37.1'))
}
return resolver
diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties
index 044f6c07c756e..a2744e89174b2 100644
--- a/build-tools-internal/version.properties
+++ b/build-tools-internal/version.properties
@@ -2,7 +2,7 @@ elasticsearch = 8.15.0
lucene = 9.10.0
bundled_jdk_vendor = openjdk
-bundled_jdk = 21.0.2+13@f2283984656d49d69e91c558476027ac
+bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4
# optional dependencies
spatial4j = 0.7
jts = 1.15.0
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
index 31e1cb882305a..999f27a646b1f 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
@@ -1107,11 +1107,11 @@ private void logFileContents(String description, Path from, boolean tailLogs) {
return;
}
- boolean foundNettyLeaks = false;
+ boolean foundLeaks = false;
for (String logLine : errorsAndWarnings.keySet()) {
- if (logLine.contains("ResourceLeakDetector]")) {
+ if (logLine.contains("ResourceLeakDetector") || logLine.contains("LeakTracker")) {
tailLogs = true;
- foundNettyLeaks = true;
+ foundLeaks = true;
break;
}
}
@@ -1140,8 +1140,8 @@ private void logFileContents(String description, Path from, boolean tailLogs) {
});
}
}
- if (foundNettyLeaks) {
- throw new TestClustersException("Found Netty ByteBuf leaks in node logs.");
+ if (foundLeaks) {
+ throw new TestClustersException("Found resource leaks in node logs.");
}
}
diff --git a/client/test/build.gradle b/client/test/build.gradle
index d9a10a9c6ffdc..8d457948b91b4 100644
--- a/client/test/build.gradle
+++ b/client/test/build.gradle
@@ -27,9 +27,9 @@ dependencies {
api "org.hamcrest:hamcrest:${versions.hamcrest}"
// mockito
- api 'org.mockito:mockito-core:5.9.0'
- api 'org.mockito:mockito-subclass:5.9.0'
- api 'net.bytebuddy:byte-buddy:1.14.11'
+ api 'org.mockito:mockito-core:5.11.0'
+ api 'org.mockito:mockito-subclass:5.11.0'
+ api 'net.bytebuddy:byte-buddy:1.14.12'
api 'org.objenesis:objenesis:3.3'
}
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
index 8cfe9a1f03914..f853304bcdf90 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
@@ -27,62 +27,64 @@ static List systemJvmOptions(Settings nodeSettings, final Map e.isEmpty() == false).collect(Collectors.toList());
}
@@ -141,12 +143,12 @@ private static String maybeEnableNativeAccess() {
/*
* Only affects 22 and 22.0.1, see https://bugs.openjdk.org/browse/JDK-8329528
*/
- private static String maybeWorkaroundG1Bug() {
+ private static Stream maybeWorkaroundG1Bug() {
Runtime.Version v = Runtime.version();
if (v.feature() == 22 && v.update() <= 1) {
- return "-XX:+UnlockDiagnosticVMOptions -XX:G1NumCollectionsKeepPinned=10000000";
+ return Stream.of("-XX:+UnlockDiagnosticVMOptions", "-XX:G1NumCollectionsKeepPinned=10000000");
}
- return "";
+ return Stream.of();
}
private static String findLibraryPath(Map sysprops) {
diff --git a/docs/changelog/106636.yaml b/docs/changelog/106636.yaml
deleted file mode 100644
index e110d98ca577d..0000000000000
--- a/docs/changelog/106636.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 106636
-summary: "ESQL: Add OPTIONS clause to FROM command"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108409.yaml b/docs/changelog/108409.yaml
new file mode 100644
index 0000000000000..6cff86cf93930
--- /dev/null
+++ b/docs/changelog/108409.yaml
@@ -0,0 +1,6 @@
+pr: 108409
+summary: Support multiple associated groups for TopN
+area: Application
+type: enhancement
+issues:
+ - 108018
diff --git a/docs/changelog/108574.yaml b/docs/changelog/108574.yaml
new file mode 100644
index 0000000000000..b3c957721e01e
--- /dev/null
+++ b/docs/changelog/108574.yaml
@@ -0,0 +1,5 @@
+pr: 108574
+summary: "[ESQL] CBRT function"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/108600.yaml b/docs/changelog/108600.yaml
new file mode 100644
index 0000000000000..59177bf34114c
--- /dev/null
+++ b/docs/changelog/108600.yaml
@@ -0,0 +1,15 @@
+pr: 108600
+summary: "Prevent DLS/FLS if `replication` is assigned"
+area: Security
+type: breaking
+issues: [ ]
+breaking:
+ title: "Prevent DLS/FLS if `replication` is assigned"
+ area: REST API
+ details: For cross-cluster API keys, {es} no longer allows specifying document-level security (DLS)
+ or field-level security (FLS) in the `search` field, if `replication` is also specified.
+ {es} likewise blocks the use of any existing cross-cluster API keys that meet this condition.
+ impact: Remove any document-level security (DLS) or field-level security (FLS) definitions from the `search` field
+ for cross-cluster API keys that also have a `replication` field, or create two separate cross-cluster API keys,
+ one for search and one for replication.
+ notable: false
diff --git a/docs/changelog/108602.yaml b/docs/changelog/108602.yaml
new file mode 100644
index 0000000000000..d544c89980123
--- /dev/null
+++ b/docs/changelog/108602.yaml
@@ -0,0 +1,5 @@
+pr: 108602
+summary: "[Inference API] Extract optional long instead of integer in `RateLimitSettings#of`"
+area: Machine Learning
+type: bug
+issues: []
diff --git a/docs/changelog/108639.yaml b/docs/changelog/108639.yaml
new file mode 100644
index 0000000000000..586270c3c761c
--- /dev/null
+++ b/docs/changelog/108639.yaml
@@ -0,0 +1,28 @@
+pr: 108639
+summary: Add support for the 'Domain' database to the geoip processor
+area: Ingest Node
+type: enhancement
+issues: []
+highlight:
+ title: Add support for the 'Domain' database to the geoip processor
+ body: |-
+ Follow on to #107287 and #107377
+
+ Adds support for the ['GeoIP2
+ Domain'](https://dev.maxmind.com/geoip/docs/databases/domain) database
+ from MaxMind to the `geoip` processor.
+
+ The `geoip` processor will automatically download the [various
+ 'GeoLite2'
+ databases](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data),
+ but the 'GeoIP2 Domain' database is not a 'GeoLite2' database -- it's a
+ commercial database available to those with a suitable license from
+ MaxMind.
+
+ The support that is being added for it in this PR is in line with the
+ support that we already have for MaxMind's 'GeoIP2 City' and 'GeoIP2
+ Country' databases -- that is, one would need to arrange their own
+ download management via some custom endpoint or otherwise arrange for
+ the relevant file(s) to be in the `$ES_CONFIG/ingest-geoip` directory on
+ the nodes of the cluster.
+ notable: true
diff --git a/docs/changelog/108643.yaml b/docs/changelog/108643.yaml
new file mode 100644
index 0000000000000..f71a943673326
--- /dev/null
+++ b/docs/changelog/108643.yaml
@@ -0,0 +1,6 @@
+pr: 108643
+summary: Use `scheduleUnlessShuttingDown` in `LeaderChecker`
+area: Cluster Coordination
+type: bug
+issues:
+ - 108642
diff --git a/docs/changelog/108651.yaml b/docs/changelog/108651.yaml
new file mode 100644
index 0000000000000..e629c114dac51
--- /dev/null
+++ b/docs/changelog/108651.yaml
@@ -0,0 +1,29 @@
+pr: 108651
+summary: Add support for the 'ISP' database to the geoip processor
+area: Ingest Node
+type: enhancement
+issues: []
+highlight:
+ title: Add support for the 'ISP' database to the geoip processor
+ body: |-
+ Follow on to https://github.com/elastic/elasticsearch/pull/107287,
+ https://github.com/elastic/elasticsearch/pull/107377, and
+ https://github.com/elastic/elasticsearch/pull/108639
+
+ Adds support for the ['GeoIP2
+ ISP'](https://dev.maxmind.com/geoip/docs/databases/isp) database from
+ MaxMind to the geoip processor.
+
+ The geoip processor will automatically download the [various 'GeoLite2'
+ databases](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data),
+ but the 'GeoIP2 ISP' database is not a 'GeoLite2' database -- it's a
+ commercial database available to those with a suitable license from
+ MaxMind.
+
+ The support that is being added for it in this PR is in line with the
+ support that we already have for MaxMind's 'GeoIP2 City' and 'GeoIP2
+ Country' databases -- that is, one would need to arrange their own
+ download management via some custom endpoint or otherwise arrange for
+ the relevant file(s) to be in the $ES_CONFIG/ingest-geoip directory on
+ the nodes of the cluster.
+ notable: true
diff --git a/docs/changelog/108654.yaml b/docs/changelog/108654.yaml
new file mode 100644
index 0000000000000..9afae6a19ca80
--- /dev/null
+++ b/docs/changelog/108654.yaml
@@ -0,0 +1,5 @@
+pr: 108654
+summary: Update bundled JDK to Java 22 (again)
+area: Packaging
+type: upgrade
+issues: []
diff --git a/docs/changelog/108672.yaml b/docs/changelog/108672.yaml
new file mode 100644
index 0000000000000..e1261fcf6f232
--- /dev/null
+++ b/docs/changelog/108672.yaml
@@ -0,0 +1,5 @@
+pr: 108672
+summary: Add bounds checking to parsing ISO8601 timezone offset values
+area: Infra/Core
+type: bug
+issues: []
diff --git a/docs/changelog/108683.yaml b/docs/changelog/108683.yaml
new file mode 100644
index 0000000000000..ad796fb9b25c7
--- /dev/null
+++ b/docs/changelog/108683.yaml
@@ -0,0 +1,28 @@
+pr: 108683
+summary: Add support for the 'Connection Type' database to the geoip processor
+area: Ingest Node
+type: enhancement
+issues: []
+highlight:
+ title: Add support for the 'Connection Type' database to the geoip processor
+ body: |-
+ Follow on to #107287, #107377, #108639, and #108651
+
+ Adds support for the ['GeoIP2 Connection
+ Type'](https://dev.maxmind.com/geoip/docs/databases/connection-type)
+ database from MaxMind to the `geoip` processor.
+
+ The `geoip` processor will automatically download the [various
+ 'GeoLite2'
+ databases](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data),
+ but the 'GeoIP2 Connection Type' database is not a 'GeoLite2' database
+ -- it's a commercial database available to those with a suitable license
+ from MaxMind.
+
+ The support that is being added for it in this PR is in line with the
+ support that we already have for MaxMind's 'GeoIP2 City' and 'GeoIP2
+ Country' databases -- that is, one would need to arrange their own
+ download management via some custom endpoint or otherwise arrange for
+ the relevant file(s) to be in the `$ES_CONFIG/ingest-geoip` directory on
+ the nodes of the cluster.
+ notable: true
diff --git a/docs/changelog/108687.yaml b/docs/changelog/108687.yaml
new file mode 100644
index 0000000000000..771516d551567
--- /dev/null
+++ b/docs/changelog/108687.yaml
@@ -0,0 +1,5 @@
+pr: 108687
+summary: Adding `user_type` support for the enterprise database for the geoip processor
+area: Ingest Node
+type: enhancement
+issues: []
diff --git a/docs/reference/cluster/get-settings.asciidoc b/docs/reference/cluster/get-settings.asciidoc
index 5a9fe81df61c7..32c186e4ef24c 100644
--- a/docs/reference/cluster/get-settings.asciidoc
+++ b/docs/reference/cluster/get-settings.asciidoc
@@ -40,4 +40,4 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=flat-settings]
(Optional, Boolean) If `true`, returns default cluster settings from the local node.
Defaults to `false`.
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc
index 8ff7da3a16ad1..6f1d769e696c5 100644
--- a/docs/reference/cluster/nodes-info.asciidoc
+++ b/docs/reference/cluster/nodes-info.asciidoc
@@ -184,7 +184,7 @@ running process:
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=flat-settings]
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeout-nodes-request]
[[cluster-nodes-info-api-example]]
diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc
index d0e4188ce74ed..bccef4bb613b3 100644
--- a/docs/reference/cluster/nodes-stats.asciidoc
+++ b/docs/reference/cluster/nodes-stats.asciidoc
@@ -147,7 +147,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=level]
(Optional, string) A comma-separated list of document types for the
`indexing` index metric.
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeout-nodes-request]
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=include-segment-file-sizes]
diff --git a/docs/reference/cluster/nodes-usage.asciidoc b/docs/reference/cluster/nodes-usage.asciidoc
index 6c53919bcfbbc..486edf67bba87 100644
--- a/docs/reference/cluster/nodes-usage.asciidoc
+++ b/docs/reference/cluster/nodes-usage.asciidoc
@@ -54,7 +54,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=node-id]
[[cluster-nodes-usage-api-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeout-nodes-request]
[[cluster-nodes-usage-api-example]]
diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc
index 0ffd700957506..4b32d5f1b903a 100644
--- a/docs/reference/cluster/tasks.asciidoc
+++ b/docs/reference/cluster/tasks.asciidoc
@@ -48,7 +48,11 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=nodes]
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=parent-task-id]
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+`timeout`::
+(Optional, <>)
+Period to wait for each node to respond. If a node does not respond before its
+timeout expires, the response does not include its information. However, timed out
+nodes are included in the response's `node_failures` property. Defaults to `30s`.
`wait_for_completion`::
(Optional, Boolean) If `true`, the request blocks until all found tasks are complete.
diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc
index f864b68f65395..7ae80276d3151 100644
--- a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc
+++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc
@@ -6,7 +6,7 @@
beta::[]
-Updates the draft `filtering` configuration of a connector and marks the draft validation state as `edited`. The filtering configuration can be activated once validated by the Elastic connector service.
+Updates the draft `filtering` configuration of a connector and marks the draft validation state as `edited`. The filtering draft is activated once validated by the running Elastic connector service.
The filtering property is used to configure sync rules (both basic and advanced) for a connector. Learn more in the {enterprise-search-ref}/sync-rules.html[sync rules documentation].
@@ -15,14 +15,13 @@ The filtering property is used to configure sync rules (both basic and advanced)
`PUT _connector//_filtering`
-`PUT _connector//_filtering/_activate`
-
[[update-connector-filtering-api-prereq]]
==== {api-prereq-title}
* To sync data using self-managed connectors, you need to deploy the {enterprise-search-ref}/build-connector.html[Elastic connector service] on your own infrastructure. This service runs automatically on Elastic Cloud for native connectors.
* The `connector_id` parameter should reference an existing connector.
-* To activate filtering rules, the `draft.validation.state` must be `valid`.
+* Filtering draft is activated once validated by the running Elastic connector service, the `draft.validation.state` must be `valid`.
+* If, after a validation attempt, the `draft.validation.state` equals to `invalid`, inspect `draft.validation.errors` and fix any issues.
[[update-connector-filtering-api-path-params]]
==== {api-path-parms-title}
@@ -185,20 +184,4 @@ PUT _connector/my-sql-connector/_filtering/_validation
Note, you can also update draft `rules` and `advanced_snippet` in a single request.
-Once the draft is updated, its validation state is set to `edited`. The connector service will then validate the rules and report the validation state as either `invalid` or `valid`. If the state is `valid`, the draft filtering can be activated with:
-
-
-[source,console]
-----
-PUT _connector/my-sql-connector/_filtering/_activate
-----
-// TEST[continued]
-
-[source,console-result]
-----
-{
- "result": "updated"
-}
-----
-
-Once filtering rules are activated, they will be applied to all subsequent full or incremental syncs.
+Once the draft is updated, its validation state is set to `edited`. The connector service will then validate the rules and report the validation state as either `invalid` or `valid`. If the state is `valid`, the draft filtering will be activated by the running Elastic connector service.
diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc
index b739751ca5b02..7968bb78939e8 100644
--- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc
+++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc
@@ -38,7 +38,7 @@ execution.
(Optional, Boolean) Includes default configurations related to the lifecycle of the target.
Defaults to `false`.
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
[[data-streams-explain-lifecycle-example]]
==== {api-examples-title}
diff --git a/docs/reference/esql/esql-index-options.asciidoc b/docs/reference/esql/esql-index-options.asciidoc
deleted file mode 100644
index 721461bd96719..0000000000000
--- a/docs/reference/esql/esql-index-options.asciidoc
+++ /dev/null
@@ -1,52 +0,0 @@
-[[esql-index-options]]
-=== {esql} index options
-
-++++
-Index options
-++++
-
-The `OPTIONS` directive of the <> command allows configuring
-the way {esql} accesses the data to be queried. The argument passed to this
-directive is a comma-separated list of option name-value pairs, with the option
-name and the corresponding value double-quoted.
-
-[source,esql]
-----
-FROM index_pattern [OPTIONS "option1"="value1"[,...[,"optionN"="valueN"]]]
-----
-
-These options can only be provided as part of a <> command,
-and they apply to all the indices provided or matched by an index pattern.
-
-The option names and their values are the same as used by the
-<>, however note that the default
-values may differ.
-
-The currently supported options are:
-
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
-+
-Defaults to `true`.
-
-// unlike "allow-no-indices", "index-ignore-unavailable" includes a default
-// in common-parms.asciidoc, which is different from QL's -- we need to
-// provide the full text here.
-`ignore_unavailable`::
-(Optional, Boolean) If `false`, the request returns an error if it targets a
-missing or closed index.
-+
-Defaults to `true`.
-
-include::{es-ref-dir}/search/search.asciidoc[tag=search-preference]
-
-*Examples*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/from.csv-spec[tag=convertFromDatetimeWithOptions]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/from.csv-spec[tag=convertFromDatetimeWithOptions-result]
-|===
-
diff --git a/docs/reference/esql/esql-language.asciidoc b/docs/reference/esql/esql-language.asciidoc
index 77f5e79753fdd..a7c0e5e01a867 100644
--- a/docs/reference/esql/esql-language.asciidoc
+++ b/docs/reference/esql/esql-language.asciidoc
@@ -10,16 +10,16 @@ Detailed reference documentation for the {esql} language:
* <>
* <>
* <>
-* <>
* <>
* <>
* <>
+* <>
include::esql-syntax.asciidoc[]
include::esql-commands.asciidoc[]
include::esql-functions-operators.asciidoc[]
include::metadata-fields.asciidoc[]
-include::esql-index-options.asciidoc[]
include::multivalued-fields.asciidoc[]
include::esql-process-data-with-dissect-grok.asciidoc[]
include::esql-enrich-data.asciidoc[]
+include::implicit-casting.asciidoc[]
diff --git a/docs/reference/esql/functions/description/cbrt.asciidoc b/docs/reference/esql/functions/description/cbrt.asciidoc
new file mode 100644
index 0000000000000..836dec8a87d69
--- /dev/null
+++ b/docs/reference/esql/functions/description/cbrt.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns the cube root of a number. The input can be any numeric value, the return value is always a double. Cube roots of infinities are null.
diff --git a/docs/reference/esql/functions/description/sqrt.asciidoc b/docs/reference/esql/functions/description/sqrt.asciidoc
index 61e4f9b64fcd1..b9f354a33541f 100644
--- a/docs/reference/esql/functions/description/sqrt.asciidoc
+++ b/docs/reference/esql/functions/description/sqrt.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Returns the square root of a number. The input can be any numeric value, the return value is always a double. Square roots of negative numbers and infinites are null.
+Returns the square root of a number. The input can be any numeric value, the return value is always a double. Square roots of negative numbers and infinities are null.
diff --git a/docs/reference/esql/functions/examples/cbrt.asciidoc b/docs/reference/esql/functions/examples/cbrt.asciidoc
new file mode 100644
index 0000000000000..56f1ef0a819e0
--- /dev/null
+++ b/docs/reference/esql/functions/examples/cbrt.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/math.csv-spec[tag=cbrt]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/math.csv-spec[tag=cbrt-result]
+|===
+
diff --git a/docs/reference/esql/functions/kibana/definition/cbrt.json b/docs/reference/esql/functions/kibana/definition/cbrt.json
new file mode 100644
index 0000000000000..600174e17ca0c
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/cbrt.json
@@ -0,0 +1,59 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "eval",
+ "name" : "cbrt",
+ "description" : "Returns the cube root of a number. The input can be any numeric value, the return value is always a double.\nCube roots of infinities are null.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Numeric expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Numeric expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Numeric expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "Numeric expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ }
+ ],
+ "examples" : [
+ "ROW d = 1000.0\n| EVAL c = cbrt(d)"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/sqrt.json b/docs/reference/esql/functions/kibana/definition/sqrt.json
index e990049a9ce67..7d9111036402d 100644
--- a/docs/reference/esql/functions/kibana/definition/sqrt.json
+++ b/docs/reference/esql/functions/kibana/definition/sqrt.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "sqrt",
- "description" : "Returns the square root of a number. The input can be any numeric value, the return value is always a double.\nSquare roots of negative numbers and infinites are null.",
+ "description" : "Returns the square root of a number. The input can be any numeric value, the return value is always a double.\nSquare roots of negative numbers and infinities are null.",
"signatures" : [
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/docs/cbrt.md b/docs/reference/esql/functions/kibana/docs/cbrt.md
new file mode 100644
index 0000000000000..50cdad02818e8
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/docs/cbrt.md
@@ -0,0 +1,12 @@
+
+
+### CBRT
+Returns the cube root of a number. The input can be any numeric value, the return value is always a double.
+Cube roots of infinities are null.
+
+```
+ROW d = 1000.0
+| EVAL c = cbrt(d)
+```
diff --git a/docs/reference/esql/functions/kibana/docs/sqrt.md b/docs/reference/esql/functions/kibana/docs/sqrt.md
index 264abe53921c4..fccec95a4884d 100644
--- a/docs/reference/esql/functions/kibana/docs/sqrt.md
+++ b/docs/reference/esql/functions/kibana/docs/sqrt.md
@@ -4,7 +4,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### SQRT
Returns the square root of a number. The input can be any numeric value, the return value is always a double.
-Square roots of negative numbers and infinites are null.
+Square roots of negative numbers and infinities are null.
```
ROW d = 100.0
diff --git a/docs/reference/esql/functions/layout/cbrt.asciidoc b/docs/reference/esql/functions/layout/cbrt.asciidoc
new file mode 100644
index 0000000000000..18106f0e6ca35
--- /dev/null
+++ b/docs/reference/esql/functions/layout/cbrt.asciidoc
@@ -0,0 +1,15 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[discrete]
+[[esql-cbrt]]
+=== `CBRT`
+
+*Syntax*
+
+[.text-center]
+image::esql/functions/signature/cbrt.svg[Embedded,opts=inline]
+
+include::../parameters/cbrt.asciidoc[]
+include::../description/cbrt.asciidoc[]
+include::../types/cbrt.asciidoc[]
+include::../examples/cbrt.asciidoc[]
diff --git a/docs/reference/esql/functions/math-functions.asciidoc b/docs/reference/esql/functions/math-functions.asciidoc
index 9aa5cd2db1927..db907c8d54061 100644
--- a/docs/reference/esql/functions/math-functions.asciidoc
+++ b/docs/reference/esql/functions/math-functions.asciidoc
@@ -13,6 +13,7 @@
* <>
* <>
* <>
+* <>
* <>
* <>
* <>
@@ -37,6 +38,7 @@ include::layout/acos.asciidoc[]
include::layout/asin.asciidoc[]
include::layout/atan.asciidoc[]
include::layout/atan2.asciidoc[]
+include::layout/cbrt.asciidoc[]
include::layout/ceil.asciidoc[]
include::layout/cos.asciidoc[]
include::layout/cosh.asciidoc[]
diff --git a/docs/reference/esql/functions/parameters/cbrt.asciidoc b/docs/reference/esql/functions/parameters/cbrt.asciidoc
new file mode 100644
index 0000000000000..65013f4c21265
--- /dev/null
+++ b/docs/reference/esql/functions/parameters/cbrt.asciidoc
@@ -0,0 +1,6 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Parameters*
+
+`number`::
+Numeric expression. If `null`, the function returns `null`.
diff --git a/docs/reference/esql/functions/signature/cbrt.svg b/docs/reference/esql/functions/signature/cbrt.svg
new file mode 100644
index 0000000000000..ba96c276caaa0
--- /dev/null
+++ b/docs/reference/esql/functions/signature/cbrt.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc
index 2fec7f40bde8b..96c29a776bc2b 100644
--- a/docs/reference/esql/functions/type-conversion-functions.asciidoc
+++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc
@@ -5,6 +5,11 @@
Type conversion functions
++++
+[TIP]
+====
+{esql} supports implicit casting from string literals to certain data types. Refer to <> for details.
+====
+
{esql} supports these type conversion functions:
// tag::type_list[]
diff --git a/docs/reference/esql/functions/types/cbrt.asciidoc b/docs/reference/esql/functions/types/cbrt.asciidoc
new file mode 100644
index 0000000000000..7cda278abdb56
--- /dev/null
+++ b/docs/reference/esql/functions/types/cbrt.asciidoc
@@ -0,0 +1,12 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Supported types*
+
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+number | result
+double | double
+integer | double
+long | double
+unsigned_long | double
+|===
diff --git a/docs/reference/esql/implicit-casting.asciidoc b/docs/reference/esql/implicit-casting.asciidoc
new file mode 100644
index 0000000000000..f0c0aa3d82063
--- /dev/null
+++ b/docs/reference/esql/implicit-casting.asciidoc
@@ -0,0 +1,53 @@
+[[esql-implicit-casting]]
+=== {esql} implicit casting
+
+++++
+Implicit casting
+++++
+
+Often users will input `datetime`, `ip`, `version`, or geospatial objects as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types.
+
+Without implicit casting users must explicitly code these `to_X` functions in their queries, when string literals don't match the target data types they are assigned or compared to. Here is an example of using `to_datetime` to explicitly perform a data type conversion.
+
+[source.merge.styled,esql]
+----
+FROM employees
+| EVAL dd_ns1=date_diff("day", to_datetime("2023-12-02T11:00:00.00Z"), birth_date)
+| SORT emp_no
+| KEEP dd_ns1
+| LIMIT 1
+----
+
+Implicit casting improves usability, by automatically converting string literals to the target data type. This is most useful when the target data type is `datetime`, `ip`, `version` or a geo spatial. It is natural to specify these as a string in queries.
+
+The first query can be coded without calling the `to_datetime` function, as follows:
+
+[source.merge.styled,esql]
+----
+FROM employees
+| EVAL dd_ns1=date_diff("day", "2023-12-02T11:00:00.00Z", birth_date)
+| SORT emp_no
+| KEEP dd_ns1
+| LIMIT 1
+----
+
+[float]
+=== Implicit casting support
+
+The following table details which {esql} operations support implicit casting for different data types.
+
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+||ScalarFunction|BinaryComparison|ArithmeticOperation|InListPredicate|AggregateFunction
+|DATETIME|Y|Y|Y|Y|N
+|DOUBLE|Y|N|N|N|N
+|LONG|Y|N|N|N|N
+|INTEGER|Y|N|N|N|N
+|IP|Y|Y|Y|Y|N
+|VERSION|Y|Y|Y|Y|N
+|GEO_POINT|Y|N|N|N|N
+|GEO_SHAPE|Y|N|N|N|N
+|CARTESIAN_POINT|Y|N|N|N|N
+|CARTESIAN_SHAPE|Y|N|N|N|N
+|BOOLEAN|Y|Y|Y|Y|N
+|===
diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc
index 427562a8c0dbb..d81c46530e089 100644
--- a/docs/reference/esql/source-commands/from.asciidoc
+++ b/docs/reference/esql/source-commands/from.asciidoc
@@ -6,7 +6,7 @@
[source,esql]
----
-FROM index_pattern [METADATA fields] [OPTIONS options]
+FROM index_pattern [METADATA fields]
----
*Parameters*
@@ -17,10 +17,6 @@ A list of indices, data streams or aliases. Supports wildcards and date math.
`fields`::
A comma-separated list of <> to retrieve.
-`options`::
-A comma-separated list of <> to configure
-data access.
-
*Description*
The `FROM` source command returns a table with data from a data stream, index,
@@ -86,11 +82,3 @@ Use the optional `METADATA` directive to enable <>.
-This directive must follow `METADATA`, if both are specified:
-
-[source,esql]
-----
-FROM employees* METADATA _index OPTIONS "ignore_unavailable"="true"
-----
diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc
index fbe017619048f..348a9e7f99e78 100644
--- a/docs/reference/ilm/apis/explain.asciidoc
+++ b/docs/reference/ilm/apis/explain.asciidoc
@@ -49,7 +49,7 @@ or `_all`.
{ilm-init} and are in an error state, either due to an encountering an error while
executing the policy, or attempting to use a policy that does not exist.
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
[[ilm-explain-lifecycle-example]]
==== {api-examples-title}
diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc
index 20e0df9f3cb92..711eccc298df1 100644
--- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc
+++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc
@@ -40,7 +40,7 @@ target. Supports wildcards (`*`). To target all data streams and indices, use
[[ilm-remove-policy-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
[[ilm-remove-policy-example]]
==== {api-examples-title}
diff --git a/docs/reference/indices/delete-component-template.asciidoc b/docs/reference/indices/delete-component-template.asciidoc
index 0ca6560f17ccb..065a4adb90023 100644
--- a/docs/reference/indices/delete-component-template.asciidoc
+++ b/docs/reference/indices/delete-component-template.asciidoc
@@ -58,4 +58,4 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=component-template]
[[delete-component-template-api-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
diff --git a/docs/reference/indices/delete-index-template-v1.asciidoc b/docs/reference/indices/delete-index-template-v1.asciidoc
index ca0b5a0e726bd..98b1e2fb255f1 100644
--- a/docs/reference/indices/delete-index-template-v1.asciidoc
+++ b/docs/reference/indices/delete-index-template-v1.asciidoc
@@ -55,4 +55,4 @@ expressions are supported.
[[delete-template-api-v1-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
diff --git a/docs/reference/indices/delete-index-template.asciidoc b/docs/reference/indices/delete-index-template.asciidoc
index 02396310daff4..b828e4a536b71 100644
--- a/docs/reference/indices/delete-index-template.asciidoc
+++ b/docs/reference/indices/delete-index-template.asciidoc
@@ -61,4 +61,4 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=index-template]
[[delete-template-api-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
diff --git a/docs/reference/indices/field-usage-stats.asciidoc b/docs/reference/indices/field-usage-stats.asciidoc
index 9fd1d9e59eb33..a4856092834e5 100644
--- a/docs/reference/indices/field-usage-stats.asciidoc
+++ b/docs/reference/indices/field-usage-stats.asciidoc
@@ -46,8 +46,6 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailabl
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards]
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
-
`fields`::
+
--
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index 1f73cd08401ee..c256b30060bf6 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -268,7 +268,7 @@ used for abuse detection.
=====
`return_documents`::
(Optional, boolean)
-For `cohere` service only. Specify whether to return doc text within the
+For `cohere` service only. Specify whether to return doc text within the
results.
`top_n`::
@@ -307,16 +307,6 @@ For `openai` and `azureopenai` service only. Specifies the user issuing the
request, which can be used for abuse detection.
=====
-+
-.`task_settings` for the `completion` task type
-[%collapsible%closed]
-=====
-`user`:::
-(optional, string)
-For `openai` service only. Specifies the user issuing the request, which can be used for abuse detection.
-=====
-
-
[discrete]
[[put-inference-api-example]]
==== {api-examples-title}
@@ -351,11 +341,11 @@ The following example shows how to create an {infer} endpoint called
[source,console]
------------------------------------------------------------
-PUT _inference/rerank/cohere-rerank
+PUT _inference/rerank/cohere-rerank
{
"service": "cohere",
"service_settings": {
- "api_key": "",
+ "api_key": "",
"model_id": "rerank-english-v3.0"
},
"task_settings": {
@@ -366,7 +356,7 @@ PUT _inference/rerank/cohere-rerank
------------------------------------------------------------
// TEST[skip:TBD]
-For more examples, also review the
+For more examples, also review the
https://docs.cohere.com/docs/elasticsearch-and-cohere#rerank-search-results-with-cohere-and-elasticsearch[Cohere documentation].
diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc
index 12e7a5f10135c..a8c6a8f647c74 100644
--- a/docs/reference/ingest/processors/geoip.asciidoc
+++ b/docs/reference/ingest/processors/geoip.asciidoc
@@ -59,10 +59,18 @@ in `properties`.
* If the GeoIP2 Anonymous IP database is used, then the following fields may be added under the `target_field`: `ip`,
`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, and `residential_proxy`. The fields actually added
depend on what has been found and which properties were configured in `properties`.
+* If the GeoIP2 Connection Type database is used, then the following fields may be added under the `target_field`: `ip`, and
+`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`.
+* If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`.
+The fields actually added depend on what has been found and which properties were configured in `properties`.
+* If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`,
+`organization_name`, `network`, `isp`, `isp_organization`, `mobile_country_code`, and `mobile_network_code`. The fields actually added
+depend on what has been found and which properties were configured in `properties`.
* If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`,
`country_iso_code`, `country_name`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `location`, `asn`,
-`organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, and `residential_proxy`.
-The fields actually added depend on what has been found and which properties were configured in `properties`.
+`organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, `residential_proxy`,
+`domain`, `isp`, `isp_organization`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields
+actually added depend on what has been found and which properties were configured in `properties`.
Here is an example that uses the default city database and adds the geographical information to the `geoip` field based on the `ip` field:
diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc
index a2a397c4efe65..15414dde86e52 100644
--- a/docs/reference/rest-api/common-parms.asciidoc
+++ b/docs/reference/rest-api/common-parms.asciidoc
@@ -1232,6 +1232,15 @@ indicate that it was not completely acknowledged. Defaults to `30s`.
Can also be set to `-1` to indicate that the request should never timeout.
end::timeoutparms[]
+tag::timeout-nodes-request[]
+`timeout`::
+(Optional, <>)
+Period to wait for each node to respond. If a node does not respond before its
+timeout expires, the response does not include its information. However, timed out
+nodes are included in the response's `_nodes.failed` property. Defaults to no
+timeout.
+end::timeout-nodes-request[]
+
tag::transform-id[]
Identifier for the {transform}.
end::transform-id[]
diff --git a/docs/reference/rest-api/watcher/start.asciidoc b/docs/reference/rest-api/watcher/start.asciidoc
index 565ef60160a9d..b153410ed2901 100644
--- a/docs/reference/rest-api/watcher/start.asciidoc
+++ b/docs/reference/rest-api/watcher/start.asciidoc
@@ -24,10 +24,8 @@ information, see <>.
//[[watcher-api-start-path-params]]
//==== {api-path-parms-title}
-[[watcher-api-start-query-params]]
-==== {api-query-parms-title}
-
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
+//[[watcher-api-start-query-params]]
+//==== {api-query-parms-title}
//[[watcher-api-start-request-body]]
//==== {api-request-body-title}
diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc
index c47ccd60afc05..590df272cc89e 100644
--- a/docs/reference/search/retriever.asciidoc
+++ b/docs/reference/search/retriever.asciidoc
@@ -12,6 +12,11 @@ allows for complex behavior to be depicted in a tree-like structure, called
the retriever tree, to better clarify the order of operations that occur
during a search.
+[TIP]
+====
+Refer to <> for a high level overview of the retrievers abstraction.
+====
+
The following retrievers are available:
`standard`::
diff --git a/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc b/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc
index 50314b6d36f28..62faceb99d4fc 100644
--- a/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc
+++ b/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc
@@ -30,10 +30,8 @@ For more information, see <>.
For example, `nodeId1,nodeId2`. For node selection options, see
<>.
-[[searchable-snapshots-api-cache-stats-query-params]]
-==== {api-query-parms-title}
-
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
+//[[searchable-snapshots-api-cache-stats-query-params]]
+//==== {api-query-parms-title}
[role="child_attributes"]
[[searchable-snapshots-api-cache-stats-response-body]]
diff --git a/docs/reference/shutdown/apis/shutdown-delete.asciidoc b/docs/reference/shutdown/apis/shutdown-delete.asciidoc
index 133539adfaa38..4d7f30c3a1e48 100644
--- a/docs/reference/shutdown/apis/shutdown-delete.asciidoc
+++ b/docs/reference/shutdown/apis/shutdown-delete.asciidoc
@@ -40,7 +40,7 @@ The ID of a node that you prepared for shut down.
[[delete-shutdown-api-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
[[delete-shutdown-api-example]]
==== {api-examples-title}
diff --git a/docs/reference/shutdown/apis/shutdown-get.asciidoc b/docs/reference/shutdown/apis/shutdown-get.asciidoc
index 264a8dd7be181..5feac28353ab5 100644
--- a/docs/reference/shutdown/apis/shutdown-get.asciidoc
+++ b/docs/reference/shutdown/apis/shutdown-get.asciidoc
@@ -37,10 +37,8 @@ Use to monitor the shut down process after calling <
The ID of a node that is being prepared for shutdown.
If no ID is specified, returns the status of all nodes being prepared for shutdown.
-[[get-shutdown-api-params]]
-==== {api-query-parms-title}
-
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+//[[get-shutdown-api-params]]
+//==== {api-query-parms-title}
[[get-shutdown-api-example]]
==== {api-examples-title}
diff --git a/docs/reference/shutdown/apis/shutdown-put.asciidoc b/docs/reference/shutdown/apis/shutdown-put.asciidoc
index 236367f886ef9..344dd8fa36717 100644
--- a/docs/reference/shutdown/apis/shutdown-put.asciidoc
+++ b/docs/reference/shutdown/apis/shutdown-put.asciidoc
@@ -50,7 +50,7 @@ No error is thrown if you specify an invalid node ID.
[[put-shutdown-api-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
[role="child_attributes"]
[[put-shutdown-api-request-body]]
diff --git a/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc
index 2931faf49841d..4301fea642523 100644
--- a/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc
+++ b/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc
@@ -51,9 +51,4 @@ supported.
[[delete-snapshot-repo-api-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
-
-`timeout`::
-(Optional, <>) Specifies the period of time to wait for
-a response. If no response is received before the timeout expires, the request
-fails and returns an error. Defaults to `30s`.
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
diff --git a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc
index c3e9c0a0904be..0d3b5586da869 100644
--- a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc
+++ b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc
@@ -52,12 +52,7 @@ IMPORTANT: Several options for this API can be specified using a query parameter
or a request body parameter. If both parameters are specified, only the query
parameter is used.
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
-
-`timeout`::
-(Optional, <>) Specifies the period of time to wait for
-a response. If no response is received before the timeout expires, the request
-fails and returns an error. Defaults to `30s`.
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
`verify`::
(Optional, Boolean)
diff --git a/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc
index 9d14e8a426e32..dd845663be8d7 100644
--- a/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc
+++ b/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc
@@ -47,12 +47,7 @@ Name of the snapshot repository to verify.
[[verify-snapshot-repo-api-query-params]]
==== {api-query-parms-title}
-include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
-
-`timeout`::
-(Optional, <>) Specifies the period of time to wait for
-a response. If no response is received before the timeout expires, the request
-fails and returns an error. Defaults to `30s`.
+include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
[role="child_attributes"]
[[verify-snapshot-repo-api-response-body]]
diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml
index 000a2c15f5b12..53db6f13a31b3 100644
--- a/gradle/verification-metadata.xml
+++ b/gradle/verification-metadata.xml
@@ -1589,9 +1589,9 @@
-
-
-
+
+
+
@@ -1720,6 +1720,27 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -3701,13 +3722,13 @@
-
-
-
+
+
+
-
-
+
+
diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle
index 8acdb0f156af1..b9d38551a2674 100644
--- a/modules/data-streams/build.gradle
+++ b/modules/data-streams/build.gradle
@@ -20,6 +20,7 @@ restResources {
dependencies {
testImplementation project(path: ':test:test-clusters')
+ internalClusterTestImplementation project(":modules:mapper-extras")
}
tasks.named('yamlRestTest') {
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
index 2b1a8e1c0e318..f79eea8676b3e 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
@@ -1281,7 +1281,7 @@ public void testSearchAllResolvesDataStreams() throws Exception {
public void testGetDataStream() throws Exception {
Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, maximumNumberOfReplicas() + 2).build();
DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build();
- putComposableIndexTemplate("template_for_foo", null, List.of("metrics-foo*"), settings, null, null, lifecycle);
+ putComposableIndexTemplate("template_for_foo", null, List.of("metrics-foo*"), settings, null, null, lifecycle, false);
int numDocsFoo = randomIntBetween(2, 16);
indexDocs("metrics-foo", numDocsFoo);
@@ -1642,7 +1642,8 @@ public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception
null,
null,
Map.of("my-alias", AliasMetadata.builder("my-alias").build()),
- null
+ null,
+ false
);
var request = new CreateDataStreamAction.Request("my-ds");
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet());
@@ -1675,7 +1676,8 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception
null,
null,
Map.of("logs", AliasMetadata.builder("logs").build()),
- null
+ null,
+ false
);
var request = new CreateDataStreamAction.Request("logs-es");
@@ -1712,7 +1714,8 @@ public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception {
null,
null,
Map.of("logs", AliasMetadata.builder("logs").build()),
- null
+ null,
+ false
)
);
assertThat(
@@ -1902,7 +1905,11 @@ static void verifyDocs(String dataStream, long expectedNumHits, long minGenerati
}
public static void putComposableIndexTemplate(String id, List patterns) throws IOException {
- putComposableIndexTemplate(id, null, patterns, null, null);
+ putComposableIndexTemplate(id, patterns, false);
+ }
+
+ public static void putComposableIndexTemplate(String id, List patterns, boolean withFailureStore) throws IOException {
+ putComposableIndexTemplate(id, null, patterns, null, null, null, null, withFailureStore);
}
public void testPartitionedTemplate() throws IOException {
@@ -2277,7 +2284,7 @@ static void putComposableIndexTemplate(
@Nullable Settings settings,
@Nullable Map metadata
) throws IOException {
- putComposableIndexTemplate(id, mappings, patterns, settings, metadata, null, null);
+ putComposableIndexTemplate(id, mappings, patterns, settings, metadata, null, null, false);
}
static void putComposableIndexTemplate(
@@ -2287,7 +2294,8 @@ static void putComposableIndexTemplate(
@Nullable Settings settings,
@Nullable Map metadata,
@Nullable Map aliases,
- @Nullable DataStreamLifecycle lifecycle
+ @Nullable DataStreamLifecycle lifecycle,
+ boolean withFailureStore
) throws IOException {
TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id);
request.indexTemplate(
@@ -2295,7 +2303,7 @@ static void putComposableIndexTemplate(
.indexPatterns(patterns)
.template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle))
.metadata(metadata)
- .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate())
+ .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, withFailureStore))
.build()
);
client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet();
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java
index da782cfd86ce2..1bd4d54b9c804 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java
@@ -36,6 +36,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
@@ -81,13 +82,17 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
private String dsBackingIndexName;
private String otherDsBackingIndexName;
+ private String fsBackingIndexName;
+ private String fsFailureIndexName;
private String ds2BackingIndexName;
private String otherDs2BackingIndexName;
+ private String fs2BackingIndexName;
+ private String fs2FailureIndexName;
private String id;
@Override
protected Collection> nodePlugins() {
- return List.of(MockRepository.Plugin.class, DataStreamsPlugin.class);
+ return List.of(MockRepository.Plugin.class, DataStreamsPlugin.class, MapperExtrasPlugin.class);
}
@Before
@@ -97,6 +102,18 @@ public void setup() throws Exception {
createRepository(REPO, "fs", location);
DataStreamIT.putComposableIndexTemplate("t1", List.of("ds", "other-ds"));
+ DataStreamIT.putComposableIndexTemplate("t2", """
+ {
+ "properties": {
+ "@timestamp": {
+ "type": "date",
+ "format": "date_optional_time||epoch_millis"
+ },
+ "flag": {
+ "type": "boolean"
+ }
+ }
+ }""", List.of("with-fs"), null, null, null, null, true);
CreateDataStreamAction.Request request = new CreateDataStreamAction.Request("ds");
AcknowledgedResponse response = client.execute(CreateDataStreamAction.INSTANCE, request).get();
@@ -106,15 +123,30 @@ public void setup() throws Exception {
response = client.execute(CreateDataStreamAction.INSTANCE, request).get();
assertTrue(response.isAcknowledged());
+ request = new CreateDataStreamAction.Request("with-fs");
+ response = client.execute(CreateDataStreamAction.INSTANCE, request).get();
+ assertTrue(response.isAcknowledged());
+
// Resolve backing index names after data streams have been created:
// (these names have a date component, and running around midnight could lead to test failures otherwise)
GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" });
GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
dsBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName();
otherDsBackingIndexName = getDataStreamResponse.getDataStreams().get(1).getDataStream().getIndices().get(0).getName();
+ fsBackingIndexName = getDataStreamResponse.getDataStreams().get(2).getDataStream().getIndices().get(0).getName();
+ fsFailureIndexName = getDataStreamResponse.getDataStreams()
+ .get(2)
+ .getDataStream()
+ .getFailureIndices()
+ .getIndices()
+ .get(0)
+ .getName();
+
// Will be used in some tests, to test renaming while restoring a snapshot:
ds2BackingIndexName = dsBackingIndexName.replace("-ds-", "-ds2-");
otherDs2BackingIndexName = otherDsBackingIndexName.replace("-other-ds-", "-other-ds2-");
+ fs2BackingIndexName = fsBackingIndexName.replace("-with-fs-", "-with-fs2-");
+ fs2FailureIndexName = fsFailureIndexName.replace("-with-fs-", "-with-fs2-");
DocWriteResponse indexResponse = client.prepareIndex("ds")
.setOpType(DocWriteRequest.OpType.CREATE)
@@ -232,12 +264,16 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception {
GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).get();
assertThat(
ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()),
- contains(equalTo("ds"), equalTo("other-ds"))
+ contains(equalTo("ds"), equalTo("other-ds"), equalTo("with-fs"))
);
List backingIndices = ds.getDataStreams().get(0).getDataStream().getIndices();
assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(dsBackingIndexName));
backingIndices = ds.getDataStreams().get(1).getDataStream().getIndices();
assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(otherDsBackingIndexName));
+ backingIndices = ds.getDataStreams().get(2).getDataStream().getIndices();
+ assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(fsBackingIndexName));
+ List failureIndices = ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices();
+ assertThat(failureIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(fsFailureIndexName));
}
public void testSnapshotAndRestoreInPlace() {
@@ -295,13 +331,72 @@ public void testSnapshotAndRestoreInPlace() {
// The backing index created as part of rollover should still exist (but just not part of the data stream)
assertThat(indexExists(backingIndexAfterSnapshot), is(true));
- // An additional rollover should create a new backing index (3th generation) and leave .ds-ds-...-2 index as is:
+ // An additional rollover should create a new backing index (3rd generation) and leave .ds-ds-...-2 index as is:
rolloverRequest = new RolloverRequest("ds", null);
rolloverResponse = client.admin().indices().rolloverIndex(rolloverRequest).actionGet();
assertThat(rolloverResponse.isRolledOver(), is(true));
assertThat(rolloverResponse.getNewIndex(), equalTo(DataStream.getDefaultBackingIndexName("ds", 3)));
}
+ public void testFailureStoreSnapshotAndRestore() throws Exception {
+ CreateSnapshotResponse createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(REPO, SNAPSHOT)
+ .setWaitForCompletion(true)
+ .setIndices("with-fs")
+ .setIncludeGlobalState(false)
+ .get();
+
+ RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
+ assertEquals(RestStatus.OK, status);
+
+ assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(fsBackingIndexName, fsFailureIndexName));
+
+ assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("with-fs")));
+
+ {
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
+ .cluster()
+ .prepareRestoreSnapshot(REPO, SNAPSHOT)
+ .setWaitForCompletion(true)
+ .setIndices("with-fs")
+ .get();
+
+ assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards());
+
+ GetDataStreamAction.Response ds = client.execute(
+ GetDataStreamAction.INSTANCE,
+ new GetDataStreamAction.Request(new String[] { "with-fs" })
+ ).get();
+ assertEquals(1, ds.getDataStreams().size());
+ assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
+ assertEquals(fsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
+ assertEquals(fsFailureIndexName, ds.getDataStreams().get(0).getDataStream().getFailureIndices().getIndices().get(0).getName());
+ }
+ {
+ // With rename pattern
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
+ .cluster()
+ .prepareRestoreSnapshot(REPO, SNAPSHOT)
+ .setWaitForCompletion(true)
+ .setIndices("with-fs")
+ .setRenamePattern("-fs")
+ .setRenameReplacement("-fs2")
+ .get();
+
+ assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards());
+
+ GetDataStreamAction.Response ds = client.execute(
+ GetDataStreamAction.INSTANCE,
+ new GetDataStreamAction.Request(new String[] { "with-fs2" })
+ ).get();
+ assertEquals(1, ds.getDataStreams().size());
+ assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
+ assertEquals(fs2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
+ assertEquals(fs2FailureIndexName, ds.getDataStreams().get(0).getDataStream().getFailureIndices().getIndices().get(0).getName());
+ }
+ }
+
public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exception {
DocWriteResponse indexResponse = client.prepareIndex("other-ds")
.setOpType(DocWriteRequest.OpType.CREATE)
@@ -338,10 +433,13 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio
if (filterDuringSnapshotting) {
assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(backingIndexName));
} else {
- assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName));
+ assertThat(
+ getSnapshot(REPO, SNAPSHOT).indices(),
+ containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName)
+ );
}
- assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get());
+ assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get());
assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));
RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT);
@@ -395,7 +493,10 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception {
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
assertEquals(RestStatus.OK, status);
- assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName));
+ assertThat(
+ getSnapshot(REPO, SNAPSHOT).indices(),
+ containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName)
+ );
assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get());
assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));
@@ -403,7 +504,7 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception {
var restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false);
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().restoreSnapshot(restoreSnapshotRequest).actionGet();
- assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards());
+ assertEquals(4, restoreSnapshotResponse.getRestoreInfo().successfulShards());
assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap());
assertResponse(client.prepareSearch("ds"), response -> {
@@ -416,10 +517,10 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception {
GetDataStreamAction.INSTANCE,
new GetDataStreamAction.Request(new String[] { "*" })
).get();
- assertEquals(2, ds.getDataStreams().size());
+ assertEquals(3, ds.getDataStreams().size());
assertThat(
ds.getDataStreams().stream().map(i -> i.getDataStream().getName()).collect(Collectors.toList()),
- containsInAnyOrder("ds", "other-ds")
+ containsInAnyOrder("ds", "other-ds", "with-fs")
);
GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet();
@@ -451,14 +552,17 @@ public void testSnapshotAndRestoreAll() throws Exception {
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
assertEquals(RestStatus.OK, status);
- assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName));
+ assertThat(
+ getSnapshot(REPO, SNAPSHOT).indices(),
+ containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName)
+ );
- assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get());
+ assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get());
assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));
var restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false);
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().restoreSnapshot(restoreSnapshotRequest).actionGet();
- assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards());
+ assertEquals(4, restoreSnapshotResponse.getRestoreInfo().successfulShards());
assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap());
assertResponse(client.prepareSearch("ds"), response -> {
@@ -471,11 +575,15 @@ public void testSnapshotAndRestoreAll() throws Exception {
GetDataStreamAction.INSTANCE,
new GetDataStreamAction.Request(new String[] { "*" })
).get();
- assertEquals(2, ds.getDataStreams().size());
+ assertEquals(3, ds.getDataStreams().size());
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
assertEquals(1, ds.getDataStreams().get(1).getDataStream().getIndices().size());
assertEquals(otherDsBackingIndexName, ds.getDataStreams().get(1).getDataStream().getIndices().get(0).getName());
+ assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size());
+ assertEquals(fsBackingIndexName, ds.getDataStreams().get(2).getDataStream().getIndices().get(0).getName());
+ assertEquals(1, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().size());
+ assertEquals(fsFailureIndexName, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().get(0).getName());
GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet();
assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds"));
@@ -507,16 +615,19 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception {
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
assertEquals(RestStatus.OK, status);
- assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName));
+ assertThat(
+ getSnapshot(REPO, SNAPSHOT).indices(),
+ containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName)
+ );
- assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get());
+ assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get());
assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));
var restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true)
.includeGlobalState(false)
.includeAliases(false);
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().restoreSnapshot(restoreSnapshotRequest).actionGet();
- assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards());
+ assertEquals(4, restoreSnapshotResponse.getRestoreInfo().successfulShards());
assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap());
assertResponse(client.prepareSearch("ds"), response -> {
@@ -529,11 +640,15 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception {
GetDataStreamAction.INSTANCE,
new GetDataStreamAction.Request(new String[] { "*" })
).get();
- assertEquals(2, ds.getDataStreams().size());
+ assertEquals(3, ds.getDataStreams().size());
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
assertEquals(1, ds.getDataStreams().get(1).getDataStream().getIndices().size());
assertEquals(otherDsBackingIndexName, ds.getDataStreams().get(1).getDataStream().getIndices().get(0).getName());
+ assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size());
+ assertEquals(fsBackingIndexName, ds.getDataStreams().get(2).getDataStream().getIndices().get(0).getName());
+ assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size());
+ assertEquals(fsFailureIndexName, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().get(0).getName());
GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("*")).actionGet();
assertThat(getAliasesResponse.getDataStreamAliases(), anEmptyMap());
@@ -930,7 +1045,32 @@ public void testPartialRestoreSnapshotThatIncludesDataStream() {
.prepareRestoreSnapshot(REPO, snapshot)
.setIndices(indexWithoutDataStream)
.setWaitForCompletion(true)
- .setRestoreGlobalState(randomBoolean())
+ .setRestoreGlobalState(false)
+ .get()
+ .getRestoreInfo();
+ assertThat(restoreInfo.failedShards(), is(0));
+ assertThat(restoreInfo.successfulShards(), is(1));
+ }
+
+ /**
+ * This test is a copy of the {@link #testPartialRestoreSnapshotThatIncludesDataStream()} the only difference
+ * is that one include the global state and one doesn't. In general this shouldn't matter that's why it used to be
+ * a random parameter of the test, but because of #107515 it fails when we include the global state. Keep them
+ * separate until this is fixed.
+ */
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107515")
+ public void testPartialRestoreSnapshotThatIncludesDataStreamWithGlobalState() {
+ final String snapshot = "test-snapshot";
+ final String indexWithoutDataStream = "test-idx-no-ds";
+ createIndexWithContent(indexWithoutDataStream);
+ createFullSnapshot(REPO, snapshot);
+ assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream));
+ RestoreInfo restoreInfo = client.admin()
+ .cluster()
+ .prepareRestoreSnapshot(REPO, snapshot)
+ .setIndices(indexWithoutDataStream)
+ .setWaitForCompletion(true)
+ .setRestoreGlobalState(true)
.get()
.getRestoreInfo();
assertThat(restoreInfo.failedShards(), is(0));
@@ -1027,7 +1167,32 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndices() {
.cluster()
.prepareRestoreSnapshot(REPO, snapshot)
.setWaitForCompletion(true)
- .setRestoreGlobalState(randomBoolean())
+ .setRestoreGlobalState(false)
+ .get()
+ .getRestoreInfo();
+ assertThat(restoreInfo.failedShards(), is(0));
+ assertThat(restoreInfo.successfulShards(), is(1));
+ }
+
+ /**
+ * This test is a copy of the {@link #testExcludeDSFromSnapshotWhenExcludingItsIndices()} the only difference
+ * is that one include the global state and one doesn't. In general this shouldn't matter that's why it used to be
+ * a random parameter of the test, but because of #107515 it fails when we include the global state. Keep them
+ * separate until this is fixed.
+ */
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107515")
+ public void testExcludeDSFromSnapshotWhenExcludingItsIndicesWithGlobalState() {
+ final String snapshot = "test-snapshot";
+ final String indexWithoutDataStream = "test-idx-no-ds";
+ createIndexWithContent(indexWithoutDataStream);
+ final SnapshotInfo snapshotInfo = createSnapshot(REPO, snapshot, List.of("*", "-.*"));
+ assertThat(snapshotInfo.dataStreams(), empty());
+ assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream));
+ RestoreInfo restoreInfo = client.admin()
+ .cluster()
+ .prepareRestoreSnapshot(REPO, snapshot)
+ .setWaitForCompletion(true)
+ .setRestoreGlobalState(true)
.get()
.getRestoreInfo();
assertThat(restoreInfo.failedShards(), is(0));
@@ -1051,7 +1216,7 @@ public void testRestoreSnapshotFully() throws Exception {
assertEquals(RestStatus.OK, restoreSnapshotResponse.status());
GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[] { "*" });
- assertThat(client.execute(GetDataStreamAction.INSTANCE, getRequest).get().getDataStreams(), hasSize(2));
+ assertThat(client.execute(GetDataStreamAction.INSTANCE, getRequest).get().getDataStreams(), hasSize(3));
assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get());
}
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java
index d43dad87a6067..7712be94b4326 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java
@@ -11,6 +11,7 @@
import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction;
import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.datastreams.DataStreamsPlugin;
@@ -229,6 +230,8 @@ public void testDeleteLifecycle() throws Exception {
// Remove lifecycle from concrete data stream
{
DeleteDataStreamLifecycleAction.Request deleteDataLifecycleRequest = new DeleteDataStreamLifecycleAction.Request(
+ TimeValue.THIRTY_SECONDS,
+ AcknowledgedRequest.DEFAULT_ACK_TIMEOUT,
new String[] { "with-lifecycle-1" }
);
assertThat(
@@ -254,6 +257,8 @@ public void testDeleteLifecycle() throws Exception {
// Remove lifecycle from all data streams
{
DeleteDataStreamLifecycleAction.Request deleteDataLifecycleRequest = new DeleteDataStreamLifecycleAction.Request(
+ TimeValue.THIRTY_SECONDS,
+ AcknowledgedRequest.DEFAULT_ACK_TIMEOUT,
new String[] { "*" }
);
assertThat(
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java
index 7252d31d838c5..97c6c1ddff977 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java
@@ -203,6 +203,7 @@ public void testSystemDataStreamRetention() throws Exception {
client().execute(
PutDataStreamGlobalRetentionAction.INSTANCE,
new PutDataStreamGlobalRetentionAction.Request(
+ TimeValue.THIRTY_SECONDS,
TimeValue.timeValueSeconds(globalRetentionSeconds),
TimeValue.timeValueSeconds(globalRetentionSeconds)
)
@@ -290,7 +291,10 @@ public void testSystemDataStreamRetention() throws Exception {
client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(SYSTEM_DATA_STREAM_NAME)).actionGet();
} finally {
- client().execute(DeleteDataStreamGlobalRetentionAction.INSTANCE, new DeleteDataStreamGlobalRetentionAction.Request());
+ client().execute(
+ DeleteDataStreamGlobalRetentionAction.INSTANCE,
+ new DeleteDataStreamGlobalRetentionAction.Request(TimeValue.THIRTY_SECONDS)
+ );
}
} finally {
dataStreamLifecycleServices.forEach(dataStreamLifecycleService -> dataStreamLifecycleService.setNowSupplier(clock::millis));
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java
index 2723637b2959b..35ee41fca18e8 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java
@@ -213,6 +213,7 @@ public void testSystemExplainLifecycle() throws Exception {
client().execute(
PutDataStreamGlobalRetentionAction.INSTANCE,
new PutDataStreamGlobalRetentionAction.Request(
+ TimeValue.THIRTY_SECONDS,
TimeValue.timeValueSeconds(globalRetentionSeconds),
TimeValue.timeValueSeconds(globalRetentionSeconds)
)
@@ -260,7 +261,10 @@ public void testSystemExplainLifecycle() throws Exception {
);
}
} finally {
- client().execute(DeleteDataStreamGlobalRetentionAction.INSTANCE, new DeleteDataStreamGlobalRetentionAction.Request());
+ client().execute(
+ DeleteDataStreamGlobalRetentionAction.INSTANCE,
+ new DeleteDataStreamGlobalRetentionAction.Request(TimeValue.THIRTY_SECONDS)
+ );
}
}
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java
index e3cdd6a8c14d9..92cb855b7cb4e 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java
@@ -24,6 +24,7 @@
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
import org.elasticsearch.datastreams.lifecycle.UpdateDataStreamGlobalRetentionService;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.tasks.Task;
@@ -64,8 +65,8 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(dryRun);
}
- public Request() {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
+ public Request(TimeValue masterNodeTimeout) {
+ super(masterNodeTimeout);
}
public boolean dryRun() {
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java
index 3bd100a106dd6..70f822ddee72a 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java
@@ -15,6 +15,7 @@
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
import java.io.IOException;
import java.util.Arrays;
@@ -47,8 +48,8 @@ public void writeTo(StreamOutput out) throws IOException {
indicesOptions.writeIndicesOptions(out);
}
- public Request(String[] names) {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT);
+ public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String[] names) {
+ super(masterNodeTimeout, ackTimeout);
this.names = names;
}
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java
index 5816823ed710a..1d1064dd42b1a 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java
@@ -47,10 +47,6 @@ private GetDataStreamGlobalRetentionAction() {/* no instances */}
public static final class Request extends MasterNodeReadRequest {
- public Request() {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
- }
-
public Request(StreamInput in) throws IOException {
super(in);
}
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java
index cc61c7fe664be..6e930defd4e0b 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java
@@ -43,8 +43,8 @@ public Request(StreamInput in) throws IOException {
super(in);
}
- public Request() {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
+ public Request(TimeValue masterNodeTimeout) {
+ super(masterNodeTimeout);
}
@Override
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java
index 65ca34a99da23..cd9156ad8b2c8 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java
@@ -32,9 +32,6 @@
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-import org.elasticsearch.xcontent.ConstructingObjectParser;
-import org.elasticsearch.xcontent.ObjectParser;
-import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.List;
@@ -53,34 +50,9 @@ private PutDataStreamGlobalRetentionAction() {/* no instances */}
public static final class Request extends MasterNodeRequest {
- public static final ConstructingObjectParser PARSER =
- new ConstructingObjectParser<>(
- "put_data_stream_global_retention_request",
- args -> new PutDataStreamGlobalRetentionAction.Request((TimeValue) args[0], (TimeValue) args[1])
- );
-
- static {
- PARSER.declareField(
- ConstructingObjectParser.optionalConstructorArg(),
- (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), DataStreamGlobalRetention.DEFAULT_RETENTION_FIELD.getPreferredName()),
- DataStreamGlobalRetention.DEFAULT_RETENTION_FIELD,
- ObjectParser.ValueType.STRING_OR_NULL
- );
- PARSER.declareField(
- ConstructingObjectParser.optionalConstructorArg(),
- (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), DataStreamGlobalRetention.MAX_RETENTION_FIELD.getPreferredName()),
- DataStreamGlobalRetention.MAX_RETENTION_FIELD,
- ObjectParser.ValueType.STRING_OR_NULL
- );
- }
-
private final DataStreamGlobalRetention globalRetention;
private boolean dryRun = false;
- public static PutDataStreamGlobalRetentionAction.Request parseRequest(XContentParser parser) {
- return PARSER.apply(parser, null);
- }
-
public Request(StreamInput in) throws IOException {
super(in);
globalRetention = DataStreamGlobalRetention.read(in);
@@ -107,8 +79,8 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(dryRun);
}
- public Request(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
+ public Request(TimeValue masterNodeTimeout, @Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) {
+ super(masterNodeTimeout);
this.globalRetention = new DataStreamGlobalRetention(defaultRetention, maxRetention);
}
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java
index a10a955b33975..a3959ae818218 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java
@@ -36,8 +36,7 @@ public List routes() {
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) {
- GetDataStreamLifecycleStatsAction.Request request = new GetDataStreamLifecycleStatsAction.Request();
- request.masterNodeTimeout(getMasterNodeTimeout(restRequest));
+ final var request = new GetDataStreamLifecycleStatsAction.Request(getMasterNodeTimeout(restRequest));
return channel -> client.execute(
GetDataStreamLifecycleStatsAction.INSTANCE,
request,
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java
index b624892ac6bba..a8a64eaf5cfa3 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java
@@ -8,6 +8,7 @@
package org.elasticsearch.datastreams.lifecycle.rest;
import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction;
@@ -20,6 +21,7 @@
import java.util.List;
import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout;
@ServerlessScope(Scope.INTERNAL)
public class RestDeleteDataStreamLifecycleAction extends BaseRestHandler {
@@ -36,7 +38,9 @@ public List routes() {
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
- DeleteDataStreamLifecycleAction.Request deleteDataLifecycleRequest = new DeleteDataStreamLifecycleAction.Request(
+ final var deleteDataLifecycleRequest = new DeleteDataStreamLifecycleAction.Request(
+ getMasterNodeTimeout(request),
+ request.paramAsTime("timeout", AcknowledgedRequest.DEFAULT_ACK_TIMEOUT),
Strings.splitStringByCommaToArray(request.param("name"))
);
deleteDataLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteDataLifecycleRequest.indicesOptions()));
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java
index 889b4c490d23f..fe7f03529a421 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java
@@ -75,6 +75,8 @@ enum Database {
Property.RESIDENTIAL_PROXY
)
),
+ ConnectionType(Set.of(Property.IP, Property.CONNECTION_TYPE), Set.of(Property.CONNECTION_TYPE)),
+ Domain(Set.of(Property.IP, Property.DOMAIN), Set.of(Property.DOMAIN)),
Enterprise(
Set.of(
Property.IP,
@@ -94,7 +96,14 @@ enum Database {
Property.ANONYMOUS_VPN,
Property.ANONYMOUS,
Property.PUBLIC_PROXY,
- Property.RESIDENTIAL_PROXY
+ Property.RESIDENTIAL_PROXY,
+ Property.DOMAIN,
+ Property.ISP,
+ Property.ISP_ORGANIZATION_NAME,
+ Property.MOBILE_COUNTRY_CODE,
+ Property.MOBILE_NETWORK_CODE,
+ Property.USER_TYPE,
+ Property.CONNECTION_TYPE
),
Set.of(
Property.COUNTRY_ISO_CODE,
@@ -105,13 +114,38 @@ enum Database {
Property.CITY_NAME,
Property.LOCATION
)
+ ),
+ Isp(
+ Set.of(
+ Property.IP,
+ Property.ASN,
+ Property.ORGANIZATION_NAME,
+ Property.NETWORK,
+ Property.ISP,
+ Property.ISP_ORGANIZATION_NAME,
+ Property.MOBILE_COUNTRY_CODE,
+ Property.MOBILE_NETWORK_CODE
+ ),
+ Set.of(
+ Property.IP,
+ Property.ASN,
+ Property.ORGANIZATION_NAME,
+ Property.NETWORK,
+ Property.ISP,
+ Property.ISP_ORGANIZATION_NAME,
+ Property.MOBILE_COUNTRY_CODE,
+ Property.MOBILE_NETWORK_CODE
+ )
);
private static final String CITY_DB_SUFFIX = "-City";
private static final String COUNTRY_DB_SUFFIX = "-Country";
private static final String ASN_DB_SUFFIX = "-ASN";
private static final String ANONYMOUS_IP_DB_SUFFIX = "-Anonymous-IP";
+ private static final String CONNECTION_TYPE_DB_SUFFIX = "-Connection-Type";
+ private static final String DOMAIN_DB_SUFFIX = "-Domain";
private static final String ENTERPRISE_DB_SUFFIX = "-Enterprise";
+ private static final String ISP_DB_SUFFIX = "-ISP";
/**
* Parses the passed-in databaseType (presumably from the passed-in databaseFile) and return the Database instance that is
@@ -133,8 +167,14 @@ public static Database getDatabase(final String databaseType, final String datab
database = Database.Asn;
} else if (databaseType.endsWith(Database.ANONYMOUS_IP_DB_SUFFIX)) {
database = Database.AnonymousIp;
+ } else if (databaseType.endsWith(Database.CONNECTION_TYPE_DB_SUFFIX)) {
+ database = Database.ConnectionType;
+ } else if (databaseType.endsWith(Database.DOMAIN_DB_SUFFIX)) {
+ database = Database.Domain;
} else if (databaseType.endsWith(Database.ENTERPRISE_DB_SUFFIX)) {
database = Database.Enterprise;
+ } else if (databaseType.endsWith(Database.ISP_DB_SUFFIX)) {
+ database = Database.Isp;
}
}
@@ -209,7 +249,14 @@ enum Property {
ANONYMOUS_VPN,
ANONYMOUS,
PUBLIC_PROXY,
- RESIDENTIAL_PROXY;
+ RESIDENTIAL_PROXY,
+ DOMAIN,
+ ISP,
+ ISP_ORGANIZATION_NAME,
+ MOBILE_COUNTRY_CODE,
+ MOBILE_NETWORK_CODE,
+ CONNECTION_TYPE,
+ USER_TYPE;
/**
* Parses a string representation of a property into an actual Property instance. Not all properties that exist are
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
index 12f6a299e1232..72873efd0d73f 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
@@ -15,8 +15,11 @@
import com.maxmind.geoip2.model.AnonymousIpResponse;
import com.maxmind.geoip2.model.AsnResponse;
import com.maxmind.geoip2.model.CityResponse;
+import com.maxmind.geoip2.model.ConnectionTypeResponse;
import com.maxmind.geoip2.model.CountryResponse;
+import com.maxmind.geoip2.model.DomainResponse;
import com.maxmind.geoip2.model.EnterpriseResponse;
+import com.maxmind.geoip2.model.IspResponse;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -177,12 +180,30 @@ public AnonymousIpResponse getAnonymousIp(InetAddress ipAddress) {
return getResponse(ipAddress, DatabaseReader::tryAnonymousIp);
}
+ @Nullable
+ @Override
+ public ConnectionTypeResponse getConnectionType(InetAddress ipAddress) {
+ return getResponse(ipAddress, DatabaseReader::tryConnectionType);
+ }
+
+ @Nullable
+ @Override
+ public DomainResponse getDomain(InetAddress ipAddress) {
+ return getResponse(ipAddress, DatabaseReader::tryDomain);
+ }
+
@Nullable
@Override
public EnterpriseResponse getEnterprise(InetAddress ipAddress) {
return getResponse(ipAddress, DatabaseReader::tryEnterprise);
}
+ @Nullable
+ @Override
+ public IspResponse getIsp(InetAddress ipAddress) {
+ return getResponse(ipAddress, DatabaseReader::tryIsp);
+ }
+
boolean preLookup() {
return currentUsages.updateAndGet(current -> current < 0 ? current : current + 1) > 0;
}
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java
index 088fa2b0d1fa8..674c500f069bc 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java
@@ -11,8 +11,11 @@
import com.maxmind.geoip2.model.AnonymousIpResponse;
import com.maxmind.geoip2.model.AsnResponse;
import com.maxmind.geoip2.model.CityResponse;
+import com.maxmind.geoip2.model.ConnectionTypeResponse;
import com.maxmind.geoip2.model.CountryResponse;
+import com.maxmind.geoip2.model.DomainResponse;
import com.maxmind.geoip2.model.EnterpriseResponse;
+import com.maxmind.geoip2.model.IspResponse;
import org.elasticsearch.core.Nullable;
@@ -58,9 +61,18 @@ public interface GeoIpDatabase {
@Nullable
AnonymousIpResponse getAnonymousIp(InetAddress ipAddress);
+ @Nullable
+ ConnectionTypeResponse getConnectionType(InetAddress ipAddress);
+
+ @Nullable
+ DomainResponse getDomain(InetAddress ipAddress);
+
@Nullable
EnterpriseResponse getEnterprise(InetAddress ipAddress);
+ @Nullable
+ IspResponse getIsp(InetAddress ipAddress);
+
/**
* Releases the current database object. Called after processing a single document. Databases should be closed or returned to a
* resource pool. No further interactions should be expected.
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
index 6898e44335793..8e7f5d575378d 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
@@ -12,8 +12,12 @@
import com.maxmind.geoip2.model.AnonymousIpResponse;
import com.maxmind.geoip2.model.AsnResponse;
import com.maxmind.geoip2.model.CityResponse;
+import com.maxmind.geoip2.model.ConnectionTypeResponse;
+import com.maxmind.geoip2.model.ConnectionTypeResponse.ConnectionType;
import com.maxmind.geoip2.model.CountryResponse;
+import com.maxmind.geoip2.model.DomainResponse;
import com.maxmind.geoip2.model.EnterpriseResponse;
+import com.maxmind.geoip2.model.IspResponse;
import com.maxmind.geoip2.record.City;
import com.maxmind.geoip2.record.Continent;
import com.maxmind.geoip2.record.Country;
@@ -175,7 +179,10 @@ private Map getGeoData(GeoIpDatabase geoIpDatabase, String ip) t
case Country -> retrieveCountryGeoData(geoIpDatabase, ipAddress);
case Asn -> retrieveAsnGeoData(geoIpDatabase, ipAddress);
case AnonymousIp -> retrieveAnonymousIpGeoData(geoIpDatabase, ipAddress);
+ case ConnectionType -> retrieveConnectionTypeGeoData(geoIpDatabase, ipAddress);
+ case Domain -> retrieveDomainGeoData(geoIpDatabase, ipAddress);
case Enterprise -> retrieveEnterpriseGeoData(geoIpDatabase, ipAddress);
+ case Isp -> retrieveIspGeoData(geoIpDatabase, ipAddress);
};
}
@@ -317,7 +324,7 @@ private Map retrieveAsnGeoData(GeoIpDatabase geoIpDatabase, Inet
return Map.of();
}
Long asn = response.getAutonomousSystemNumber();
- String organization_name = response.getAutonomousSystemOrganization();
+ String organizationName = response.getAutonomousSystemOrganization();
Network network = response.getNetwork();
Map geoData = new HashMap<>();
@@ -330,8 +337,8 @@ private Map retrieveAsnGeoData(GeoIpDatabase geoIpDatabase, Inet
}
}
case ORGANIZATION_NAME -> {
- if (organization_name != null) {
- geoData.put("organization_name", organization_name);
+ if (organizationName != null) {
+ geoData.put("organization_name", organizationName);
}
}
case NETWORK -> {
@@ -384,6 +391,50 @@ private Map retrieveAnonymousIpGeoData(GeoIpDatabase geoIpDataba
return geoData;
}
+ private Map retrieveConnectionTypeGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) {
+ ConnectionTypeResponse response = geoIpDatabase.getConnectionType(ipAddress);
+ if (response == null) {
+ return Map.of();
+ }
+
+ ConnectionType connectionType = response.getConnectionType();
+
+ Map geoData = new HashMap<>();
+ for (Property property : this.properties) {
+ switch (property) {
+ case IP -> geoData.put("ip", NetworkAddress.format(ipAddress));
+ case CONNECTION_TYPE -> {
+ if (connectionType != null) {
+ geoData.put("connection_type", connectionType.toString());
+ }
+ }
+ }
+ }
+ return geoData;
+ }
+
+ private Map retrieveDomainGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) {
+ DomainResponse response = geoIpDatabase.getDomain(ipAddress);
+ if (response == null) {
+ return Map.of();
+ }
+
+ String domain = response.getDomain();
+
+ Map geoData = new HashMap<>();
+ for (Property property : this.properties) {
+ switch (property) {
+ case IP -> geoData.put("ip", NetworkAddress.format(ipAddress));
+ case DOMAIN -> {
+ if (domain != null) {
+ geoData.put("domain", domain);
+ }
+ }
+ }
+ }
+ return geoData;
+ }
+
private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) {
EnterpriseResponse response = geoIpDatabase.getEnterprise(ipAddress);
if (response == null) {
@@ -397,9 +448,14 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas
Subdivision subdivision = response.getMostSpecificSubdivision();
Long asn = response.getTraits().getAutonomousSystemNumber();
- String organization_name = response.getTraits().getAutonomousSystemOrganization();
+ String organizationName = response.getTraits().getAutonomousSystemOrganization();
Network network = response.getTraits().getNetwork();
+ String isp = response.getTraits().getIsp();
+ String ispOrganization = response.getTraits().getOrganization();
+ String mobileCountryCode = response.getTraits().getMobileCountryCode();
+ String mobileNetworkCode = response.getTraits().getMobileNetworkCode();
+
boolean isHostingProvider = response.getTraits().isHostingProvider();
boolean isTorExitNode = response.getTraits().isTorExitNode();
boolean isAnonymousVpn = response.getTraits().isAnonymousVpn();
@@ -407,6 +463,12 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas
boolean isPublicProxy = response.getTraits().isPublicProxy();
boolean isResidentialProxy = response.getTraits().isResidentialProxy();
+ String userType = response.getTraits().getUserType();
+
+ String domain = response.getTraits().getDomain();
+
+ ConnectionType connectionType = response.getTraits().getConnectionType();
+
Map geoData = new HashMap<>();
for (Property property : this.properties) {
switch (property) {
@@ -473,8 +535,8 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas
}
}
case ORGANIZATION_NAME -> {
- if (organization_name != null) {
- geoData.put("organization_name", organization_name);
+ if (organizationName != null) {
+ geoData.put("organization_name", organizationName);
}
}
case NETWORK -> {
@@ -500,6 +562,99 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas
case RESIDENTIAL_PROXY -> {
geoData.put("residential_proxy", isResidentialProxy);
}
+ case DOMAIN -> {
+ if (domain != null) {
+ geoData.put("domain", domain);
+ }
+ }
+ case ISP -> {
+ if (isp != null) {
+ geoData.put("isp", isp);
+ }
+ }
+ case ISP_ORGANIZATION_NAME -> {
+ if (ispOrganization != null) {
+ geoData.put("isp_organization", ispOrganization);
+ }
+ }
+ case MOBILE_COUNTRY_CODE -> {
+ if (mobileCountryCode != null) {
+ geoData.put("mobile_country_code", mobileCountryCode);
+ }
+ }
+ case MOBILE_NETWORK_CODE -> {
+ if (mobileNetworkCode != null) {
+ geoData.put("mobile_network_code", mobileNetworkCode);
+ }
+ }
+ case USER_TYPE -> {
+ if (userType != null) {
+ geoData.put("user_type", userType);
+ }
+ }
+ case CONNECTION_TYPE -> {
+ if (connectionType != null) {
+ geoData.put("connection_type", connectionType.toString());
+ }
+ }
+ }
+ }
+ return geoData;
+ }
+
+ private Map retrieveIspGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) {
+ IspResponse response = geoIpDatabase.getIsp(ipAddress);
+ if (response == null) {
+ return Map.of();
+ }
+
+ String isp = response.getIsp();
+ String ispOrganization = response.getOrganization();
+ String mobileNetworkCode = response.getMobileNetworkCode();
+ String mobileCountryCode = response.getMobileCountryCode();
+ Long asn = response.getAutonomousSystemNumber();
+ String organizationName = response.getAutonomousSystemOrganization();
+ Network network = response.getNetwork();
+
+ Map geoData = new HashMap<>();
+ for (Property property : this.properties) {
+ switch (property) {
+ case IP -> geoData.put("ip", NetworkAddress.format(ipAddress));
+ case ASN -> {
+ if (asn != null) {
+ geoData.put("asn", asn);
+ }
+ }
+ case ORGANIZATION_NAME -> {
+ if (organizationName != null) {
+ geoData.put("organization_name", organizationName);
+ }
+ }
+ case NETWORK -> {
+ if (network != null) {
+ geoData.put("network", network.toString());
+ }
+ }
+ case ISP -> {
+ if (isp != null) {
+ geoData.put("isp", isp);
+ }
+ }
+ case ISP_ORGANIZATION_NAME -> {
+ if (ispOrganization != null) {
+ geoData.put("isp_organization", ispOrganization);
+ }
+ }
+ case MOBILE_COUNTRY_CODE -> {
+ if (mobileCountryCode != null) {
+ geoData.put("mobile_country_code", mobileCountryCode);
+ }
+ }
+ case MOBILE_NETWORK_CODE -> {
+ if (mobileNetworkCode != null) {
+ geoData.put("mobile_network_code", mobileNetworkCode);
+ }
+ }
}
}
return geoData;
diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java
index ec77cacbdb6b6..6eb4e9b1acb51 100644
--- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java
+++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java
@@ -11,6 +11,7 @@
import com.maxmind.geoip2.DatabaseReader;
import org.elasticsearch.common.CheckedSupplier;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.core.PathUtils;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.ingest.RandomDocumentPicks;
@@ -29,6 +30,7 @@
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.is;
@@ -38,6 +40,24 @@ public class GeoIpProcessorTests extends ESTestCase {
private static final Set ALL_PROPERTIES = Set.of(Property.values());
+ public void testDatabasePropertyInvariants() {
+ // the city database is like a specialization of the country database
+ assertThat(Sets.difference(Database.Country.properties(), Database.City.properties()), is(empty()));
+ assertThat(Sets.difference(Database.Country.defaultProperties(), Database.City.defaultProperties()), is(empty()));
+
+ // the isp database is like a specialization of the asn database
+ assertThat(Sets.difference(Database.Asn.properties(), Database.Isp.properties()), is(empty()));
+ assertThat(Sets.difference(Database.Asn.defaultProperties(), Database.Isp.defaultProperties()), is(empty()));
+
+ // the enterprise database is like everything joined together
+ for (Database type : Database.values()) {
+ assertThat(Sets.difference(type.properties(), Database.Enterprise.properties()), is(empty()));
+ }
+ // but in terms of the default fields, it's like a drop-in replacement for the city database
+ // n.b. this is just a choice we decided to make here at Elastic
+ assertThat(Database.Enterprise.defaultProperties(), equalTo(Database.City.defaultProperties()));
+ }
+
public void testCity() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
randomAlphaOfLength(10),
@@ -336,8 +356,64 @@ public void testAnonymmousIp() throws Exception {
assertThat(geoData.get("residential_proxy"), equalTo(true));
}
+ public void testConnectionType() throws Exception {
+ String ip = "214.78.120.5";
+ GeoIpProcessor processor = new GeoIpProcessor(
+ randomAlphaOfLength(10),
+ null,
+ "source_field",
+ loader("/GeoIP2-Connection-Type-Test.mmdb"),
+ () -> true,
+ "target_field",
+ ALL_PROPERTIES,
+ false,
+ false,
+ "filename"
+ );
+
+ Map document = new HashMap<>();
+ document.put("source_field", ip);
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ processor.execute(ingestDocument);
+
+ assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip));
+ @SuppressWarnings("unchecked")
+ Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field");
+ assertThat(geoData.size(), equalTo(2));
+ assertThat(geoData.get("ip"), equalTo(ip));
+ assertThat(geoData.get("connection_type"), equalTo("Satellite"));
+ }
+
+ public void testDomain() throws Exception {
+ String ip = "69.219.64.2";
+ GeoIpProcessor processor = new GeoIpProcessor(
+ randomAlphaOfLength(10),
+ null,
+ "source_field",
+ loader("/GeoIP2-Domain-Test.mmdb"),
+ () -> true,
+ "target_field",
+ ALL_PROPERTIES,
+ false,
+ false,
+ "filename"
+ );
+
+ Map document = new HashMap<>();
+ document.put("source_field", ip);
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ processor.execute(ingestDocument);
+
+ assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip));
+ @SuppressWarnings("unchecked")
+ Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field");
+ assertThat(geoData.size(), equalTo(2));
+ assertThat(geoData.get("ip"), equalTo(ip));
+ assertThat(geoData.get("domain"), equalTo("ameritech.net"));
+ }
+
public void testEnterprise() throws Exception {
- String ip = "2.125.160.216";
+ String ip = "74.209.24.4";
GeoIpProcessor processor = new GeoIpProcessor(
randomAlphaOfLength(10),
null,
@@ -359,26 +435,67 @@ public void testEnterprise() throws Exception {
assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip));
@SuppressWarnings("unchecked")
Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field");
- assertThat(geoData.size(), equalTo(16));
+ assertThat(geoData.size(), equalTo(23));
assertThat(geoData.get("ip"), equalTo(ip));
- assertThat(geoData.get("country_iso_code"), equalTo("GB"));
- assertThat(geoData.get("country_name"), equalTo("United Kingdom"));
- assertThat(geoData.get("continent_name"), equalTo("Europe"));
- assertThat(geoData.get("region_iso_code"), equalTo("GB-WBK"));
- assertThat(geoData.get("region_name"), equalTo("West Berkshire"));
- assertThat(geoData.get("city_name"), equalTo("Boxford"));
- assertThat(geoData.get("timezone"), equalTo("Europe/London"));
+ assertThat(geoData.get("country_iso_code"), equalTo("US"));
+ assertThat(geoData.get("country_name"), equalTo("United States"));
+ assertThat(geoData.get("continent_name"), equalTo("North America"));
+ assertThat(geoData.get("region_iso_code"), equalTo("US-NY"));
+ assertThat(geoData.get("region_name"), equalTo("New York"));
+ assertThat(geoData.get("city_name"), equalTo("Chatham"));
+ assertThat(geoData.get("timezone"), equalTo("America/New_York"));
Map location = new HashMap<>();
- location.put("lat", 51.75);
- location.put("lon", -1.25);
+ location.put("lat", 42.3478);
+ location.put("lon", -73.5549);
assertThat(geoData.get("location"), equalTo(location));
- assertThat(geoData.get("network"), equalTo("2.125.160.216/29"));
+ assertThat(geoData.get("asn"), equalTo(14671L));
+ assertThat(geoData.get("organization_name"), equalTo("FairPoint Communications"));
+ assertThat(geoData.get("network"), equalTo("74.209.16.0/20"));
assertThat(geoData.get("hosting_provider"), equalTo(false));
assertThat(geoData.get("tor_exit_node"), equalTo(false));
assertThat(geoData.get("anonymous_vpn"), equalTo(false));
assertThat(geoData.get("anonymous"), equalTo(false));
assertThat(geoData.get("public_proxy"), equalTo(false));
assertThat(geoData.get("residential_proxy"), equalTo(false));
+ assertThat(geoData.get("domain"), equalTo("frpt.net"));
+ assertThat(geoData.get("isp"), equalTo("Fairpoint Communications"));
+ assertThat(geoData.get("isp_organization"), equalTo("Fairpoint Communications"));
+ assertThat(geoData.get("user_type"), equalTo("residential"));
+ assertThat(geoData.get("connection_type"), equalTo("Cable/DSL"));
+ }
+
+ public void testIsp() throws Exception {
+ String ip = "149.101.100.1";
+ GeoIpProcessor processor = new GeoIpProcessor(
+ randomAlphaOfLength(10),
+ null,
+ "source_field",
+ loader("/GeoIP2-ISP-Test.mmdb"),
+ () -> true,
+ "target_field",
+ ALL_PROPERTIES,
+ false,
+ false,
+ "filename"
+ );
+
+ Map document = new HashMap<>();
+ document.put("source_field", ip);
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ processor.execute(ingestDocument);
+
+ assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip));
+ @SuppressWarnings("unchecked")
+ Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field");
+ assertThat(geoData.size(), equalTo(8));
+ assertThat(geoData.get("ip"), equalTo(ip));
+ assertThat(geoData.get("asn"), equalTo(6167L));
+ assertThat(geoData.get("organization_name"), equalTo("CELLCO-PART"));
+ assertThat(geoData.get("network"), equalTo("149.101.100.0/28"));
+ assertThat(geoData.get("isp"), equalTo("Verizon Wireless"));
+ assertThat(geoData.get("isp_organization"), equalTo("Verizon Wireless"));
+ assertThat(geoData.get("mobile_network_code"), equalTo("004"));
+ assertThat(geoData.get("mobile_country_code"), equalTo("310"));
}
public void testAddressIsNotInTheDatabase() throws Exception {
diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java
index 4e6e1d11c0fdd..a465ae7cd799d 100644
--- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java
+++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java
@@ -153,6 +153,9 @@ public class MaxMindSupportTests extends ESTestCase {
"traits.userType"
);
+ private static final Set CONNECT_TYPE_SUPPORTED_FIELDS = Set.of("connectionType");
+ private static final Set CONNECT_TYPE_UNSUPPORTED_FIELDS = Set.of("ipAddress", "network");
+
private static final Set COUNTRY_SUPPORTED_FIELDS = Set.of("continent.name", "country.isoCode", "country.name");
private static final Set COUNTRY_UNSUPPORTED_FIELDS = Set.of(
"continent.code",
@@ -201,6 +204,9 @@ public class MaxMindSupportTests extends ESTestCase {
"traits.userType"
);
+ private static final Set DOMAIN_SUPPORTED_FIELDS = Set.of("domain");
+ private static final Set DOMAIN_UNSUPPORTED_FIELDS = Set.of("ipAddress", "network");
+
private static final Set ENTERPRISE_SUPPORTED_FIELDS = Set.of(
"city.name",
"continent.name",
@@ -215,11 +221,18 @@ public class MaxMindSupportTests extends ESTestCase {
"traits.anonymousVpn",
"traits.autonomousSystemNumber",
"traits.autonomousSystemOrganization",
+ "traits.connectionType",
+ "traits.domain",
"traits.hostingProvider",
+ "traits.isp",
+ "traits.mobileCountryCode",
+ "traits.mobileNetworkCode",
"traits.network",
+ "traits.organization",
"traits.publicProxy",
"traits.residentialProxy",
- "traits.torExitNode"
+ "traits.torExitNode",
+ "traits.userType"
);
private static final Set ENTERPRISE_UNSUPPORTED_FIELDS = Set.of(
"city.confidence",
@@ -267,20 +280,25 @@ public class MaxMindSupportTests extends ESTestCase {
"subdivisions.names",
"traits.anonymousProxy",
"traits.anycast",
- "traits.connectionType",
- "traits.domain",
"traits.ipAddress",
- "traits.isp",
"traits.legitimateProxy",
- "traits.mobileCountryCode",
- "traits.mobileNetworkCode",
- "traits.organization",
"traits.satelliteProvider",
"traits.staticIpScore",
- "traits.userCount",
- "traits.userType"
+ "traits.userCount"
+ );
+
+ private static final Set ISP_SUPPORTED_FIELDS = Set.of(
+ "autonomousSystemNumber",
+ "autonomousSystemOrganization",
+ "network",
+ "isp",
+ "mobileCountryCode",
+ "mobileNetworkCode",
+ "organization"
);
+ private static final Set ISP_UNSUPPORTED_FIELDS = Set.of("ipAddress");
+
private static final Map> TYPE_TO_SUPPORTED_FIELDS_MAP = Map.of(
Database.AnonymousIp,
ANONYMOUS_IP_SUPPORTED_FIELDS,
@@ -288,10 +306,16 @@ public class MaxMindSupportTests extends ESTestCase {
ASN_SUPPORTED_FIELDS,
Database.City,
CITY_SUPPORTED_FIELDS,
+ Database.ConnectionType,
+ CONNECT_TYPE_SUPPORTED_FIELDS,
Database.Country,
COUNTRY_SUPPORTED_FIELDS,
+ Database.Domain,
+ DOMAIN_SUPPORTED_FIELDS,
Database.Enterprise,
- ENTERPRISE_SUPPORTED_FIELDS
+ ENTERPRISE_SUPPORTED_FIELDS,
+ Database.Isp,
+ ISP_SUPPORTED_FIELDS
);
private static final Map> TYPE_TO_UNSUPPORTED_FIELDS_MAP = Map.of(
Database.AnonymousIp,
@@ -300,10 +324,16 @@ public class MaxMindSupportTests extends ESTestCase {
ASN_UNSUPPORTED_FIELDS,
Database.City,
CITY_UNSUPPORTED_FIELDS,
+ Database.ConnectionType,
+ CONNECT_TYPE_UNSUPPORTED_FIELDS,
Database.Country,
COUNTRY_UNSUPPORTED_FIELDS,
+ Database.Domain,
+ DOMAIN_UNSUPPORTED_FIELDS,
Database.Enterprise,
- ENTERPRISE_UNSUPPORTED_FIELDS
+ ENTERPRISE_UNSUPPORTED_FIELDS,
+ Database.Isp,
+ ISP_UNSUPPORTED_FIELDS
);
private static final Map> TYPE_TO_MAX_MIND_CLASS = Map.of(
Database.AnonymousIp,
@@ -312,18 +342,19 @@ public class MaxMindSupportTests extends ESTestCase {
AsnResponse.class,
Database.City,
CityResponse.class,
+ Database.ConnectionType,
+ ConnectionTypeResponse.class,
Database.Country,
CountryResponse.class,
+ Database.Domain,
+ DomainResponse.class,
Database.Enterprise,
- EnterpriseResponse.class
+ EnterpriseResponse.class,
+ Database.Isp,
+ IspResponse.class
);
- private static final Set> KNOWN_UNSUPPORTED_RESPONSE_CLASSES = Set.of(
- ConnectionTypeResponse.class,
- DomainResponse.class,
- IspResponse.class,
- IpRiskResponse.class
- );
+ private static final Set> KNOWN_UNSUPPORTED_RESPONSE_CLASSES = Set.of(IpRiskResponse.class);
public void testMaxMindSupport() {
for (Database databaseType : Database.values()) {
diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb
new file mode 100644
index 0000000000000..7bfae78964df0
Binary files /dev/null and b/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb differ
diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb
new file mode 100644
index 0000000000000..d21c2a93df7d4
Binary files /dev/null and b/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb differ
diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb
new file mode 100644
index 0000000000000..d16b0eee4c5e5
Binary files /dev/null and b/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb differ
diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
index 275666eec5c42..b48b2941e6097 100644
--- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
+++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
@@ -8,6 +8,8 @@
package org.elasticsearch.kibana;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchRequest;
@@ -15,12 +17,15 @@
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
+import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
@@ -42,7 +47,12 @@
* threads that wait on a phaser. This lets us verify that operations on system indices
* are being directed to other thread pools.
*/
+@TestLogging(
+ reason = "investigate",
+ value = "org.elasticsearch.kibana.KibanaThreadPoolIT:DEBUG,org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor:TRACE"
+)
public class KibanaThreadPoolIT extends ESIntegTestCase {
+ private static final Logger logger = LogManager.getLogger(KibanaThreadPoolIT.class);
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
@@ -195,10 +205,21 @@ private static void fillThreadPoolQueues(String threadPoolName, ThreadPool threa
try {
threadPool.executor(threadPoolName).execute(() -> {});
} catch (EsRejectedExecutionException e) {
+ logger.debug("Exception when filling the queue " + threadPoolName, e);
+ logThreadPoolQueue(threadPoolName, threadPool);
// we can't be sure that some other task won't get queued in a test cluster
// but the threadpool's thread is already blocked
}
}
+
+ logThreadPoolQueue(threadPoolName, threadPool);
+ }
+
+ private static void logThreadPoolQueue(String threadPoolName, ThreadPool threadPool) {
+ if (threadPool.executor(threadPoolName) instanceof EsThreadPoolExecutor tpe) {
+ logger.debug("Thread pool details " + threadPoolName + " " + tpe);
+ logger.debug(Arrays.toString(tpe.getTasks().toArray()));
+ }
}
}
diff --git a/muted-tests.yml b/muted-tests.yml
index 210215a131339..e1e80a3d3459b 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -1,6 +1,9 @@
tests:
- class: "org.elasticsearch.xpack.transform.transforms.scheduling.MonotonicClockTests"
issue: "https://github.com/elastic/elasticsearch/issues/108529"
+- class: "org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilterTests"
+ issue: "https://github.com/elastic/elasticsearch/issues/108649"
+ method: "testManyRandomDocs"
# Examples:
#
# Mute a single test case in a YAML test suite:
diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java
index 81ac8ab1200f6..f9723f30cc371 100644
--- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java
+++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java
@@ -126,6 +126,13 @@ public void teardownTest() {
rm(tempDir);
}
+ @Override
+ protected void dumpDebug() {
+ final Result containerLogs = getContainerLogs();
+ logger.warn("Elasticsearch log stdout:\n" + containerLogs.stdout());
+ logger.warn("Elasticsearch log stderr:\n" + containerLogs.stderr());
+ }
+
/**
* Checks that the Docker image can be run, and that it passes various checks.
*/
@@ -1220,7 +1227,8 @@ public void test500Readiness() throws Exception {
);
waitForElasticsearch(installation);
dumpDebug();
- assertTrue(readinessProbe(9399));
+ // readiness may still take time as file settings are applied into cluster state (even non-existent file settings)
+ assertBusy(() -> assertTrue(readinessProbe(9399)));
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99508")
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml
index 715e696bd1032..ad70ad7f8fb1e 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml
@@ -2,6 +2,7 @@
"Capabilities API":
- requires:
+ test_runner_features: [capabilities]
capabilities:
- method: GET
path: /_capabilities
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
index 0b8ee5ea82601..919d548d6498d 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
@@ -250,17 +250,16 @@ public void testRolloverDryRun() throws Exception {
ensureGreen();
Logger allocationServiceLogger = LogManager.getLogger(AllocationService.class);
- MockLogAppender appender = new MockLogAppender();
- appender.addExpectation(
- new MockLogAppender.UnseenEventExpectation(
- "no related message logged on dry run",
- AllocationService.class.getName(),
- Level.INFO,
- "*test_index*"
- )
- );
final RolloverResponse response;
- try (var ignored = appender.capturing(AllocationService.class)) {
+ try (var appender = MockLogAppender.capture(AllocationService.class)) {
+ appender.addExpectation(
+ new MockLogAppender.UnseenEventExpectation(
+ "no related message logged on dry run",
+ AllocationService.class.getName(),
+ Level.INFO,
+ "*test_index*"
+ )
+ );
response = indicesAdmin().prepareRolloverIndex("test_alias").dryRun(true).get();
appender.assertAllExpectationsMatched();
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
index e36d7a4e56eab..7f94809e64fa6 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java
@@ -40,7 +40,7 @@
import org.elasticsearch.test.disruption.NetworkDisruption.Bridge;
import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions;
import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
-import org.elasticsearch.test.junit.annotations.TestIssueLogging;
+import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.xcontent.XContentType;
@@ -94,17 +94,17 @@ static ConflictMode randomMode() {
}
/**
- * Test that we do not loose document whose indexing request was successful, under a randomly selected disruption scheme
+ * Test that we do not lose documents, indexed via requests that return success, under randomly selected disruption schemes.
* We also collect & report the type of indexing failures that occur.
*
- * This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
+ * This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates.
*/
- @TestIssueLogging(
+ @TestLogging(
value = "_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE,"
+ "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE,"
+ "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE,"
+ "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE",
- issueUrl = "https://github.com/elastic/elasticsearch/issues/41068"
+ reason = "Past failures have required a lot of additional logging to debug"
)
public void testAckedIndexing() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
index 407b1aae40600..09dd564d864db 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
@@ -102,23 +102,6 @@ public Path nodeConfigPath(int nodeOrdinal) {
}
public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception {
- MockLogAppender mockAppender = new MockLogAppender();
- mockAppender.addExpectation(
- new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") {
-
- @Override
- public boolean innerMatch(final LogEvent event) {
- return event.getThrown() != null
- && event.getThrown().getClass() == RemoteTransportException.class
- && event.getThrown().getCause() != null
- && event.getThrown().getCause().getClass() == IllegalStateException.class
- && event.getThrown()
- .getCause()
- .getMessage()
- .contains("cannot join node with [discovery.type] set to [single-node]");
- }
- }
- );
final TransportService service = internalCluster().getInstance(TransportService.class);
final int port = service.boundAddress().publishAddress().getPort();
final NodeConfigurationSource configurationSource = new NodeConfigurationSource() {
@@ -155,7 +138,24 @@ public Path nodeConfigPath(int nodeOrdinal) {
Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class),
Function.identity()
);
- try (var ignored = mockAppender.capturing(JoinHelper.class)) {
+ try (var mockAppender = MockLogAppender.capture(JoinHelper.class)) {
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") {
+
+ @Override
+ public boolean innerMatch(final LogEvent event) {
+ return event.getThrown() != null
+ && event.getThrown().getClass() == RemoteTransportException.class
+ && event.getThrown().getCause() != null
+ && event.getThrown().getCause().getClass() == IllegalStateException.class
+ && event.getThrown()
+ .getCause()
+ .getMessage()
+ .contains("cannot join node with [discovery.type] set to [single-node]");
+ }
+ }
+ );
+
other.beforeTest(random());
final ClusterState first = internalCluster().getInstance(ClusterService.class).state();
assertThat(first.nodes().getSize(), equalTo(1));
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java
index 3f7ed48b714fb..b9850bc95275c 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java
@@ -49,6 +49,7 @@
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.NoOpEngine;
import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
import org.elasticsearch.index.seqno.SequenceNumbers;
@@ -633,7 +634,8 @@ public static final IndexShard newIndexShard(
cbs,
IndexModule.DEFAULT_SNAPSHOT_COMMIT_SUPPLIER,
System::nanoTime,
- null
+ null,
+ MapperMetrics.NOOP
);
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java
index 488641c853562..215596c8130be 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java
@@ -68,10 +68,9 @@ public void testShardLockFailure() throws Exception {
}
});
- var mockLogAppender = new MockLogAppender();
try (
var ignored1 = internalCluster().getInstance(NodeEnvironment.class, node).shardLock(shardId, "blocked for test");
- var ignored2 = mockLogAppender.capturing(IndicesClusterStateService.class);
+ var mockLogAppender = MockLogAppender.capture(IndicesClusterStateService.class);
) {
final CountDownLatch countDownLatch = new CountDownLatch(1);
@@ -138,10 +137,9 @@ public void testShardLockTimeout() throws Exception {
final var shardId = new ShardId(resolveIndex(indexName), 0);
- var mockLogAppender = new MockLogAppender();
try (
var ignored1 = internalCluster().getInstance(NodeEnvironment.class, node).shardLock(shardId, "blocked for test");
- var ignored2 = mockLogAppender.capturing(IndicesClusterStateService.class);
+ var mockLogAppender = MockLogAppender.capture(IndicesClusterStateService.class);
) {
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java
index 4446338c4ff2a..2a58ef8eab3bc 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java
@@ -31,7 +31,6 @@
import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.core.Releasable;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
@@ -649,8 +648,7 @@ public void testManyIndicesWithSameMapping() {
reason = "verify the log output on cancelled"
)
public void testCancel() throws Exception {
- MockLogAppender logAppender = new MockLogAppender();
- try (Releasable ignored = logAppender.capturing(TransportFieldCapabilitiesAction.class)) {
+ try (var logAppender = MockLogAppender.capture(TransportFieldCapabilitiesAction.class)) {
logAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"clear resources",
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index 52d0fea8806a6..aa47663ad3886 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -1264,11 +1264,10 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept
final String repoName = "test-repo";
createRepository(repoName, "fs");
- final MockLogAppender mockAppender = new MockLogAppender();
- mockAppender.addExpectation(
- new MockLogAppender.UnseenEventExpectation("no warnings", BlobStoreRepository.class.getCanonicalName(), Level.WARN, "*")
- );
- try (var ignored = mockAppender.capturing(BlobStoreRepository.class)) {
+ try (var mockAppender = MockLogAppender.capture(BlobStoreRepository.class)) {
+ mockAppender.addExpectation(
+ new MockLogAppender.UnseenEventExpectation("no warnings", BlobStoreRepository.class.getCanonicalName(), Level.WARN, "*")
+ );
final String index1 = "index-1";
final String index2 = "index-2";
createIndexWithContent("index-1");
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java
index aa0b1edaafd6c..c9d77d7e41f16 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java
@@ -162,8 +162,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception {
value = "org.elasticsearch.snapshots.RestoreService:INFO"
)
public void testRestoreLogging() throws IllegalAccessException {
- final MockLogAppender mockLogAppender = new MockLogAppender();
- try (var ignored = mockLogAppender.capturing(RestoreService.class)) {
+ try (var mockLogAppender = MockLogAppender.capture(RestoreService.class)) {
String indexName = "testindex";
String repoName = "test-restore-snapshot-repo";
String snapshotName = "test-restore-snapshot";
@@ -899,8 +898,7 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti
index(indexName, "some_id", Map.of("foo", "bar"));
assertAcked(indicesAdmin().prepareClose(indexName).get());
- final MockLogAppender mockAppender = new MockLogAppender();
- try (var ignored = mockAppender.capturing(FileRestoreContext.class)) {
+ try (var mockAppender = MockLogAppender.capture(FileRestoreContext.class)) {
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*")
);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java
index 9aec3504a65f5..40dc9cbf6ff9f 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java
@@ -136,9 +136,7 @@ public void testWarningSpeedOverRecovery() throws Exception {
}
final String primaryNode = internalCluster().startNode(primaryNodeSettings);
- final MockLogAppender mockLogAppender = new MockLogAppender();
- try (var ignored = mockLogAppender.capturing(BlobStoreRepository.class)) {
-
+ try (var mockLogAppender = MockLogAppender.capture(BlobStoreRepository.class)) {
MockLogAppender.EventuallySeenEventExpectation snapshotExpectation = new MockLogAppender.EventuallySeenEventExpectation(
"snapshot speed over recovery speed",
"org.elasticsearch.repositories.blobstore.BlobStoreRepository",
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java
index 19e66c6653577..307219bcc667e 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java
@@ -31,9 +31,7 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E
createIndexWithRandomDocs("test-index", randomIntBetween(1, 42));
createSnapshot("test-repo", "test-snapshot", List.of("test-index"));
- final MockLogAppender mockLogAppender = new MockLogAppender();
-
- try (var ignored = mockLogAppender.capturing(SnapshotsService.class)) {
+ try (var mockLogAppender = MockLogAppender.capture(SnapshotsService.class)) {
mockLogAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"[does-not-exist]",
@@ -80,10 +78,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception {
createIndexWithRandomDocs("test-index", randomIntBetween(1, 42));
createSnapshot("test-repo", "test-snapshot", List.of("test-index"));
- final MockLogAppender mockLogAppender = new MockLogAppender();
-
- try (var ignored = mockLogAppender.capturing(SnapshotsService.class)) {
-
+ try (var mockLogAppender = MockLogAppender.capture(SnapshotsService.class)) {
mockLogAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"[test-snapshot]",
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index f1232d2442c8b..3fc1af3f4c3c7 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -165,6 +165,10 @@ static TransportVersion def(int id) {
public static final TransportVersion JOIN_STATUS_AGE_SERIALIZATION = def(8_656_00_0);
public static final TransportVersion ML_RERANK_DOC_OPTIONAL = def(8_657_00_0);
public static final TransportVersion FAILURE_STORE_FIELD_PARITY = def(8_658_00_0);
+ public static final TransportVersion ML_INFERENCE_AZURE_AI_STUDIO = def(8_659_00_0);
+ public static final TransportVersion ML_INFERENCE_COHERE_COMPLETION_ADDED = def(8_660_00_0);
+ public static final TransportVersion ESQL_REMOVE_ES_SOURCE_OPTIONS = def(8_661_00_0);
+
/*
* STOP! READ THIS FIRST! No, really,
* ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index ab93f98c5648b..f08dde7c5ba94 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -807,6 +807,7 @@ private static ActionFilters setupActionFilters(List actionPlugins
finalFilters.add(filter);
}
}
+ mappedFilters.addAll(plugin.getMappedActionFilters());
}
if (mappedFilters.isEmpty() == false) {
finalFilters.add(new MappedActionFilters(mappedFilters));
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
index 5d20443fa3989..c2fd49eb91a42 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
@@ -15,6 +15,7 @@
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
+import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -65,7 +66,9 @@ public class CreateSnapshotRequest extends MasterNodeRequest null,
indexSettings.getMode().idFieldMapperWithoutFieldData(),
- scriptService
+ scriptService,
+ mapperMetrics
)
) {
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY);
diff --git a/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java b/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java
index 4f1d131dd8ced..d03efe4a23670 100644
--- a/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java
+++ b/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java
@@ -427,7 +427,7 @@ private ZoneId parseZoneId(CharSequence str, int pos) {
pos++; // read the + or -
Integer hours = parseInt(str, pos, pos += 2);
- if (hours == null) return null;
+ if (hours == null || hours > 23) return null;
if (len == pos) return ofHoursMinutesSeconds(hours, 0, 0, positive);
boolean hasColon = false;
@@ -437,7 +437,7 @@ private ZoneId parseZoneId(CharSequence str, int pos) {
}
Integer minutes = parseInt(str, pos, pos += 2);
- if (minutes == null) return null;
+ if (minutes == null || minutes > 59) return null;
if (len == pos) return ofHoursMinutesSeconds(hours, minutes, 0, positive);
// either both dividers have a colon, or neither do
@@ -447,7 +447,7 @@ private ZoneId parseZoneId(CharSequence str, int pos) {
}
Integer seconds = parseInt(str, pos, pos += 2);
- if (seconds == null) return null;
+ if (seconds == null || seconds > 59) return null;
if (len == pos) return ofHoursMinutesSeconds(hours, minutes, seconds, positive);
// there's some text left over...
diff --git a/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java b/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java
index 4dfb3d1f46e25..a1b8690e7ea66 100644
--- a/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java
+++ b/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java
@@ -19,7 +19,7 @@
/** Common implementation for array lists that slice data into fixed-size blocks. */
abstract class AbstractBigArray extends AbstractArray {
- private final PageCacheRecycler recycler;
+ protected final PageCacheRecycler recycler;
private Recycler.V>[] cache;
private final int pageShift;
@@ -93,7 +93,7 @@ private static T[] grow(T[] array, int minSize) {
return array;
}
- private T registerNewPage(Recycler.V v, int page, int expectedSize) {
+ protected T registerNewPage(Recycler.V v, int page, int expectedSize) {
cache = grow(cache, page + 1);
assert cache[page] == null;
cache[page] = v;
@@ -101,24 +101,6 @@ private T registerNewPage(Recycler.V v, int page, int expectedSize) {
return v.v();
}
- protected final byte[] newBytePage(int page) {
- if (recycler != null) {
- final Recycler.V v = recycler.bytePage(clearOnResize);
- return registerNewPage(v, page, PageCacheRecycler.BYTE_PAGE_SIZE);
- } else {
- return new byte[PageCacheRecycler.BYTE_PAGE_SIZE];
- }
- }
-
- protected final Object[] newObjectPage(int page) {
- if (recycler != null) {
- final Recycler.V
*/
@Override public T visitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx) { return visitChildren(ctx); }
- /**
- * {@inheritDoc}
- *
- * The default implementation returns the result of calling
- * {@link #visitChildren} on {@code ctx}.
- */
- @Override public T visitFromOptions(EsqlBaseParser.FromOptionsContext ctx) { return visitChildren(ctx); }
- /**
- * {@inheritDoc}
- *
- * The default implementation returns the result of calling
- * {@link #visitChildren} on {@code ctx}.
- */
- @Override public T visitConfigOption(EsqlBaseParser.ConfigOptionContext ctx) { return visitChildren(ctx); }
/**
* {@inheritDoc}
*
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java
index ac4047ffbd22f..978ac68670752 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java
@@ -345,26 +345,6 @@ public interface EsqlBaseParserListener extends ParseTreeListener {
* @param ctx the parse tree
*/
void exitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx);
- /**
- * Enter a parse tree produced by {@link EsqlBaseParser#fromOptions}.
- * @param ctx the parse tree
- */
- void enterFromOptions(EsqlBaseParser.FromOptionsContext ctx);
- /**
- * Exit a parse tree produced by {@link EsqlBaseParser#fromOptions}.
- * @param ctx the parse tree
- */
- void exitFromOptions(EsqlBaseParser.FromOptionsContext ctx);
- /**
- * Enter a parse tree produced by {@link EsqlBaseParser#configOption}.
- * @param ctx the parse tree
- */
- void enterConfigOption(EsqlBaseParser.ConfigOptionContext ctx);
- /**
- * Exit a parse tree produced by {@link EsqlBaseParser#configOption}.
- * @param ctx the parse tree
- */
- void exitConfigOption(EsqlBaseParser.ConfigOptionContext ctx);
/**
* Enter a parse tree produced by {@link EsqlBaseParser#metadata}.
* @param ctx the parse tree
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java
index 37b94cd585c11..bd24afcd28c4a 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java
@@ -209,18 +209,6 @@ public interface EsqlBaseParserVisitor extends ParseTreeVisitor {
* @return the visitor result
*/
T visitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx);
- /**
- * Visit a parse tree produced by {@link EsqlBaseParser#fromOptions}.
- * @param ctx the parse tree
- * @return the visitor result
- */
- T visitFromOptions(EsqlBaseParser.FromOptionsContext ctx);
- /**
- * Visit a parse tree produced by {@link EsqlBaseParser#configOption}.
- * @param ctx the parse tree
- * @return the visitor result
- */
- T visitConfigOption(EsqlBaseParser.ConfigOptionContext ctx);
/**
* Visit a parse tree produced by {@link EsqlBaseParser#metadata}.
* @param ctx the parse tree
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java
index b8fc29e4ef64d..1365f1698176f 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java
@@ -47,7 +47,6 @@
import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute;
import org.elasticsearch.xpack.ql.expression.UnresolvedStar;
import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction;
-import org.elasticsearch.xpack.ql.options.EsSourceOptions;
import org.elasticsearch.xpack.ql.parser.ParserUtils;
import org.elasticsearch.xpack.ql.plan.TableIdentifier;
import org.elasticsearch.xpack.ql.plan.logical.Filter;
@@ -235,21 +234,7 @@ public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) {
}
}
}
- EsSourceOptions esSourceOptions = new EsSourceOptions();
- if (ctx.fromOptions() != null) {
- for (var o : ctx.fromOptions().configOption()) {
- var nameContext = o.string().get(0);
- String name = visitString(nameContext).fold().toString();
- String value = visitString(o.string().get(1)).fold().toString();
- try {
- esSourceOptions.addOption(name, value);
- } catch (IllegalArgumentException iae) {
- var cause = iae.getCause() != null ? ". " + iae.getCause().getMessage() : "";
- throw new ParsingException(iae, source(nameContext), "invalid options provided: " + iae.getMessage() + cause);
- }
- }
- }
- return new EsqlUnresolvedRelation(source, table, Arrays.asList(metadataMap.values().toArray(Attribute[]::new)), esSourceOptions);
+ return new EsqlUnresolvedRelation(source, table, Arrays.asList(metadataMap.values().toArray(Attribute[]::new)));
}
@Override
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java
index 19c3d9cf52109..52535beec2bfa 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java
@@ -9,7 +9,6 @@
import org.elasticsearch.xpack.ql.expression.Attribute;
import org.elasticsearch.xpack.ql.expression.FieldAttribute;
import org.elasticsearch.xpack.ql.index.EsIndex;
-import org.elasticsearch.xpack.ql.options.EsSourceOptions;
import org.elasticsearch.xpack.ql.plan.logical.LeafPlan;
import org.elasticsearch.xpack.ql.tree.NodeInfo;
import org.elasticsearch.xpack.ql.tree.NodeUtils;
@@ -26,33 +25,26 @@ public class EsRelation extends LeafPlan {
private final EsIndex index;
private final List attrs;
- private final EsSourceOptions esSourceOptions;
private final boolean frozen;
public EsRelation(Source source, EsIndex index, boolean frozen) {
- this(source, index, flatten(source, index.mapping()), EsSourceOptions.NO_OPTIONS, frozen);
+ this(source, index, flatten(source, index.mapping()), frozen);
}
public EsRelation(Source source, EsIndex index, List attributes) {
- this(source, index, attributes, EsSourceOptions.NO_OPTIONS, false);
+ this(source, index, attributes, false);
}
- public EsRelation(Source source, EsIndex index, List attributes, EsSourceOptions esSourceOptions) {
- this(source, index, attributes, esSourceOptions, false);
- }
-
- public EsRelation(Source source, EsIndex index, List attributes, EsSourceOptions esSourceOptions, boolean frozen) {
+ public EsRelation(Source source, EsIndex index, List attributes, boolean frozen) {
super(source);
this.index = index;
this.attrs = attributes;
- Objects.requireNonNull(esSourceOptions);
- this.esSourceOptions = esSourceOptions;
this.frozen = frozen;
}
@Override
protected NodeInfo info() {
- return NodeInfo.create(this, EsRelation::new, index, attrs, esSourceOptions, frozen);
+ return NodeInfo.create(this, EsRelation::new, index, attrs, frozen);
}
private static List flatten(Source source, Map mapping) {
@@ -82,10 +74,6 @@ public EsIndex index() {
return index;
}
- public EsSourceOptions esSourceOptions() {
- return esSourceOptions;
- }
-
public boolean frozen() {
return frozen;
}
@@ -102,7 +90,7 @@ public boolean expressionsResolved() {
@Override
public int hashCode() {
- return Objects.hash(index, esSourceOptions, frozen);
+ return Objects.hash(index, frozen);
}
@Override
@@ -116,7 +104,7 @@ public boolean equals(Object obj) {
}
EsRelation other = (EsRelation) obj;
- return Objects.equals(index, other.index) && Objects.equals(esSourceOptions, other.esSourceOptions) && frozen == other.frozen;
+ return Objects.equals(index, other.index) && frozen == other.frozen;
}
@Override
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java
index 6eb5926f8b5c9..2b91ab61e43be 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java
@@ -8,54 +8,31 @@
package org.elasticsearch.xpack.esql.plan.logical;
import org.elasticsearch.xpack.ql.expression.Attribute;
-import org.elasticsearch.xpack.ql.options.EsSourceOptions;
import org.elasticsearch.xpack.ql.plan.TableIdentifier;
import org.elasticsearch.xpack.ql.tree.NodeInfo;
import org.elasticsearch.xpack.ql.tree.Source;
import java.util.List;
-import java.util.Objects;
public class EsqlUnresolvedRelation extends UnresolvedRelation {
private final List metadataFields;
- private final EsSourceOptions esSourceOptions;
-
- public EsqlUnresolvedRelation(
- Source source,
- TableIdentifier table,
- List metadataFields,
- EsSourceOptions esSourceOptions,
- String unresolvedMessage
- ) {
- super(source, table, "", false, unresolvedMessage);
- this.metadataFields = metadataFields;
- Objects.requireNonNull(esSourceOptions);
- this.esSourceOptions = esSourceOptions;
- }
public EsqlUnresolvedRelation(Source source, TableIdentifier table, List metadataFields, String unresolvedMessage) {
- this(source, table, metadataFields, EsSourceOptions.NO_OPTIONS, unresolvedMessage);
- }
-
- public EsqlUnresolvedRelation(Source source, TableIdentifier table, List metadataFields, EsSourceOptions esSourceOptions) {
- this(source, table, metadataFields, esSourceOptions, null);
+ super(source, table, "", false, unresolvedMessage);
+ this.metadataFields = metadataFields;
}
public EsqlUnresolvedRelation(Source source, TableIdentifier table, List metadataFields) {
- this(source, table, metadataFields, EsSourceOptions.NO_OPTIONS, null);
+ this(source, table, metadataFields, null);
}
public List metadataFields() {
return metadataFields;
}
- public EsSourceOptions esSourceOptions() {
- return esSourceOptions;
- }
-
@Override
protected NodeInfo info() {
- return NodeInfo.create(this, EsqlUnresolvedRelation::new, table(), metadataFields(), esSourceOptions(), unresolvedMessage());
+ return NodeInfo.create(this, EsqlUnresolvedRelation::new, table(), metadataFields(), unresolvedMessage());
}
}
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java
index fbfc57261bc40..6110ed1b72c28 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java
@@ -43,7 +43,6 @@
import org.elasticsearch.xpack.ql.expression.Expression;
import org.elasticsearch.xpack.ql.expression.FieldAttribute;
import org.elasticsearch.xpack.ql.expression.predicate.Predicates;
-import org.elasticsearch.xpack.ql.options.EsSourceOptions;
import org.elasticsearch.xpack.ql.plan.logical.Filter;
import org.elasticsearch.xpack.ql.plan.logical.Limit;
import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan;
@@ -218,12 +217,6 @@ static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName, Predicate<
return Queries.combine(FILTER, asList(requestFilter));
}
- public static EsSourceOptions esSourceOptions(PhysicalPlan plan) {
- Holder holder = new Holder<>();
- plan.forEachUp(FragmentExec.class, f -> f.fragment().forEachUp(EsRelation.class, r -> holder.set(r.esSourceOptions())));
- return holder.get();
- }
-
/**
* Map QL's {@link DataType} to the compute engine's {@link ElementType}, for sortable types only.
* This specifically excludes spatial data types, which are not themselves sortable.
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java
index d9005d5997b34..1632d8b8bf950 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java
@@ -72,7 +72,6 @@
import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner;
import org.elasticsearch.xpack.esql.planner.PlannerUtils;
import org.elasticsearch.xpack.esql.session.EsqlConfiguration;
-import org.elasticsearch.xpack.ql.options.EsSourceOptions;
import java.util.ArrayList;
import java.util.Collections;
@@ -304,51 +303,42 @@ private void startComputeOnDataNodes(
// Since it's used only for @timestamp, it is relatively safe to assume it's not needed
// but it would be better to have a proper impl.
QueryBuilder requestFilter = PlannerUtils.requestFilter(planWithReducer, x -> true);
- EsSourceOptions esSourceOptions = PlannerUtils.esSourceOptions(planWithReducer);
- lookupDataNodes(
- parentTask,
- clusterAlias,
- requestFilter,
- concreteIndices,
- originalIndices,
- esSourceOptions,
- ActionListener.wrap(dataNodes -> {
- try (RefCountingRunnable refs = new RefCountingRunnable(() -> parentListener.onResponse(null))) {
- // For each target node, first open a remote exchange on the remote node, then link the exchange source to
- // the new remote exchange sink, and initialize the computation on the target node via data-node-request.
- for (DataNode node : dataNodes) {
- var dataNodeListener = ActionListener.releaseAfter(dataNodeListenerSupplier.get(), refs.acquire());
- var queryPragmas = configuration.pragmas();
- ExchangeService.openExchange(
- transportService,
- node.connection,
- sessionId,
- queryPragmas.exchangeBufferSize(),
- esqlExecutor,
- dataNodeListener.delegateFailureAndWrap((delegate, unused) -> {
- var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection);
- exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients());
- transportService.sendChildRequest(
- node.connection,
- DATA_ACTION_NAME,
- new DataNodeRequest(
- sessionId,
- configuration,
- clusterAlias,
- node.shardIds,
- node.aliasFilters,
- planWithReducer
- ),
- parentTask,
- TransportRequestOptions.EMPTY,
- new ActionListenerResponseHandler<>(delegate, ComputeResponse::new, esqlExecutor)
- );
- })
- );
- }
+ lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodes -> {
+ try (RefCountingRunnable refs = new RefCountingRunnable(() -> parentListener.onResponse(null))) {
+ // For each target node, first open a remote exchange on the remote node, then link the exchange source to
+ // the new remote exchange sink, and initialize the computation on the target node via data-node-request.
+ for (DataNode node : dataNodes) {
+ var dataNodeListener = ActionListener.releaseAfter(dataNodeListenerSupplier.get(), refs.acquire());
+ var queryPragmas = configuration.pragmas();
+ ExchangeService.openExchange(
+ transportService,
+ node.connection,
+ sessionId,
+ queryPragmas.exchangeBufferSize(),
+ esqlExecutor,
+ dataNodeListener.delegateFailureAndWrap((delegate, unused) -> {
+ var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection);
+ exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients());
+ transportService.sendChildRequest(
+ node.connection,
+ DATA_ACTION_NAME,
+ new DataNodeRequest(
+ sessionId,
+ configuration,
+ clusterAlias,
+ node.shardIds,
+ node.aliasFilters,
+ planWithReducer
+ ),
+ parentTask,
+ TransportRequestOptions.EMPTY,
+ new ActionListenerResponseHandler<>(delegate, ComputeResponse::new, esqlExecutor)
+ );
+ })
+ );
}
- }, parentListener::onFailure)
- );
+ }
+ }, parentListener::onFailure));
}
private void startComputeOnRemoteClusters(
@@ -554,7 +544,6 @@ private void lookupDataNodes(
QueryBuilder filter,
Set concreteIndices,
String[] originalIndices,
- EsSourceOptions esSourceOptions,
ActionListener> listener
) {
ThreadContext threadContext = transportService.getThreadPool().getThreadContext();
@@ -598,10 +587,10 @@ private void lookupDataNodes(
threadContext.markAsSystemContext();
SearchShardsRequest searchShardsRequest = new SearchShardsRequest(
originalIndices,
- esSourceOptions.indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS),
+ SearchRequest.DEFAULT_INDICES_OPTIONS,
filter,
null,
- esSourceOptions.preference(),
+ null,
false,
clusterAlias
);
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java
index cf311d4413671..cc26cff9deac7 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java
@@ -111,6 +111,7 @@ public class EsqlFeatures implements FeatureSpecification {
/**
* Does ESQL support FROM OPTIONS?
*/
+ @Deprecated
public static final NodeFeature FROM_OPTIONS = new NodeFeature("esql.from_options");
/**
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java
index ad9902a91d002..b573de7cc3435 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java
@@ -11,13 +11,13 @@
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.action.fieldcaps.IndexFieldCapabilities;
-import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.mapper.TimeSeriesParams;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ql.index.EsIndex;
import org.elasticsearch.xpack.ql.index.IndexResolution;
+import org.elasticsearch.xpack.ql.index.IndexResolver;
import org.elasticsearch.xpack.ql.type.DataType;
import org.elasticsearch.xpack.ql.type.DataTypeRegistry;
import org.elasticsearch.xpack.ql.type.DateEsField;
@@ -55,14 +55,9 @@ public EsqlIndexResolver(Client client, DataTypeRegistry typeRegistry) {
/**
* Resolves a pattern to one (potentially compound meaning that spawns multiple indices) mapping.
*/
- public void resolveAsMergedMapping(
- String indexWildcard,
- Set fieldNames,
- IndicesOptions indicesOptions,
- ActionListener listener
- ) {
+ public void resolveAsMergedMapping(String indexWildcard, Set fieldNames, ActionListener listener) {
client.fieldCaps(
- createFieldCapsRequest(indexWildcard, fieldNames, indicesOptions),
+ createFieldCapsRequest(indexWildcard, fieldNames),
listener.delegateFailureAndWrap((l, response) -> l.onResponse(mergedMappings(indexWildcard, response)))
);
}
@@ -244,13 +239,13 @@ private EsField conflictingMetricTypes(String name, String fullName, FieldCapabi
return new InvalidMappedField(name, "mapped as different metric types in indices: " + indices);
}
- private static FieldCapabilitiesRequest createFieldCapsRequest(String index, Set fieldNames, IndicesOptions indicesOptions) {
+ private static FieldCapabilitiesRequest createFieldCapsRequest(String index, Set fieldNames) {
FieldCapabilitiesRequest req = new FieldCapabilitiesRequest().indices(Strings.commaDelimitedListToStringArray(index));
req.fields(fieldNames.toArray(String[]::new));
req.includeUnmapped(true);
// lenient because we throw our own errors looking at the response e.g. if something was not resolved
// also because this way security doesn't throw authorization exceptions but rather honors ignore_unavailable
- req.indicesOptions(indicesOptions);
+ req.indicesOptions(IndexResolver.FIELD_CAPS_INDICES_OPTIONS);
req.setMergeResults(false);
return req;
}
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java
index 94d559137f463..1abe994cb75c2 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java
@@ -9,7 +9,6 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
-import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.core.Assertions;
@@ -208,13 +207,11 @@ private void preAnalyzeIndices(LogicalPlan parsed, ActionListener void preAnalyzeIndices(LogicalPlan parsed, ActionListener fieldNames,
- IndicesOptions indicesOptions,
ActionListener listener
) {
indexResolver.resolveAsMergedMapping(indexWildcard, fieldNames, false, Map.of(), new ActionListener<>() {
@Override
public void onResponse(IndexResolution fromQl) {
- esqlIndexResolver.resolveAsMergedMapping(indexWildcard, fieldNames, indicesOptions, new ActionListener<>() {
+ esqlIndexResolver.resolveAsMergedMapping(indexWildcard, fieldNames, new ActionListener<>() {
@Override
public void onResponse(IndexResolution fromEsql) {
if (fromQl.isValid() == false) {
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java
new file mode 100644
index 0000000000000..3d67b4d2b1efe
--- /dev/null
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.esql.expression.function.scalar.math;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase;
+import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier;
+import org.elasticsearch.xpack.ql.expression.Expression;
+import org.elasticsearch.xpack.ql.tree.Source;
+import org.elasticsearch.xpack.ql.type.DataTypes;
+import org.elasticsearch.xpack.ql.util.NumericUtils;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Supplier;
+
+import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble;
+
+public class CbrtTests extends AbstractFunctionTestCase {
+ public CbrtTests(@Name("TestCase") Supplier testCaseSupplier) {
+ this.testCase = testCaseSupplier.get();
+ }
+
+ @ParametersFactory
+ public static Iterable parameters() {
+ String read = "Attribute[channel=0]";
+ List suppliers = new ArrayList<>();
+ // Valid values
+ TestCaseSupplier.forUnaryInt(
+ suppliers,
+ "CbrtIntEvaluator[val=" + read + "]",
+ DataTypes.DOUBLE,
+ Math::cbrt,
+ Integer.MIN_VALUE,
+ Integer.MAX_VALUE,
+ List.of()
+ );
+ TestCaseSupplier.forUnaryLong(
+ suppliers,
+ "CbrtLongEvaluator[val=" + read + "]",
+ DataTypes.DOUBLE,
+ Math::cbrt,
+ Long.MIN_VALUE,
+ Long.MAX_VALUE,
+ List.of()
+ );
+ TestCaseSupplier.forUnaryUnsignedLong(
+ suppliers,
+ "CbrtUnsignedLongEvaluator[val=" + read + "]",
+ DataTypes.DOUBLE,
+ ul -> Math.cbrt(unsignedLongToDouble(NumericUtils.asLongUnsigned(ul))),
+ BigInteger.ZERO,
+ UNSIGNED_LONG_MAX,
+ List.of()
+ );
+ TestCaseSupplier.forUnaryDouble(
+ suppliers,
+ "CbrtDoubleEvaluator[val=" + read + "]",
+ DataTypes.DOUBLE,
+ Math::cbrt,
+ Double.MIN_VALUE,
+ Double.MAX_VALUE,
+ List.of()
+ );
+ suppliers = anyNullIsNull(true, suppliers);
+
+ return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(suppliers));
+ }
+
+ @Override
+ protected Expression build(Source source, List args) {
+ return new Cbrt(source, args.get(0));
+ }
+}
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java
index cfa3b4a8ea6ae..8fbce3302b25f 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java
@@ -88,7 +88,6 @@
import org.elasticsearch.xpack.ql.expression.function.Function;
import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.ArithmeticOperation;
import org.elasticsearch.xpack.ql.index.EsIndex;
-import org.elasticsearch.xpack.ql.options.EsSourceOptions;
import org.elasticsearch.xpack.ql.plan.logical.Filter;
import org.elasticsearch.xpack.ql.plan.logical.Limit;
import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan;
@@ -464,7 +463,7 @@ public void testDissectParserSimple() throws IOException {
}
public void testEsRelation() throws IOException {
- var orig = new EsRelation(Source.EMPTY, randomEsIndex(), List.of(randomFieldAttribute()), randomEsSourceOptions(), randomBoolean());
+ var orig = new EsRelation(Source.EMPTY, randomEsIndex(), List.of(randomFieldAttribute()), randomBoolean());
BytesStreamOutput bso = new BytesStreamOutput();
PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null);
PlanNamedTypes.writeEsRelation(out, orig);
@@ -475,7 +474,7 @@ public void testEsRelation() throws IOException {
public void testEsqlProject() throws IOException {
var orig = new EsqlProject(
Source.EMPTY,
- new EsRelation(Source.EMPTY, randomEsIndex(), List.of(randomFieldAttribute()), randomEsSourceOptions(), randomBoolean()),
+ new EsRelation(Source.EMPTY, randomEsIndex(), List.of(randomFieldAttribute()), randomBoolean()),
List.of(randomFieldAttribute())
);
BytesStreamOutput bso = new BytesStreamOutput();
@@ -486,13 +485,7 @@ public void testEsqlProject() throws IOException {
}
public void testMvExpand() throws IOException {
- var esRelation = new EsRelation(
- Source.EMPTY,
- randomEsIndex(),
- List.of(randomFieldAttribute()),
- randomEsSourceOptions(),
- randomBoolean()
- );
+ var esRelation = new EsRelation(Source.EMPTY, randomEsIndex(), List.of(randomFieldAttribute()), randomBoolean());
var orig = new MvExpand(Source.EMPTY, esRelation, randomFieldAttribute(), randomFieldAttribute());
BytesStreamOutput bso = new BytesStreamOutput();
PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null);
@@ -684,31 +677,6 @@ static Map randomProperties(int depth) {
return Map.copyOf(map);
}
- static EsSourceOptions randomEsSourceOptions() {
- EsSourceOptions eso = new EsSourceOptions();
- if (randomBoolean()) {
- eso.addOption("allow_no_indices", String.valueOf(randomBoolean()));
- }
- if (randomBoolean()) {
- eso.addOption("ignore_unavailable", String.valueOf(randomBoolean()));
- }
- if (randomBoolean()) {
- String idsList = String.join(",", randomList(1, 5, PlanNamedTypesTests::randomName));
- eso.addOption(
- "preference",
- randomFrom(
- "_only_local",
- "_local",
- "_only_nodes:" + idsList,
- "_prefer_nodes:" + idsList,
- "_shards:" + idsList,
- randomName()
- )
- );
- }
- return eso;
- }
-
static List DATA_TYPES = EsqlDataTypes.types().stream().toList();
static DataType randomDataType() {
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java
index ddd53cad8ec6d..633e0479b11d5 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java
@@ -9,8 +9,6 @@
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Build;
-import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.common.Randomness;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.esql.VerificationException;
@@ -594,7 +592,7 @@ public void testMetadataFieldOnOtherSources() {
expectError("show info metadata _index", "line 1:11: token recognition error at: 'm'");
expectError(
"explain [from foo] metadata _index",
- "line 1:20: mismatched input 'metadata' expecting {'|', ',', OPENING_BRACKET, ']', 'options', 'metadata'}"
+ "line 1:20: mismatched input 'metadata' expecting {'|', ',', OPENING_BRACKET, ']', 'metadata'}"
);
}
@@ -618,106 +616,6 @@ public void testMetadataFieldNotFoundNormalField() {
expectError("from test metadata emp_no", "line 1:21: unsupported metadata field [emp_no]");
}
- public void testFromOptionsUnknownName() {
- expectError(FROM + " options \"foo\"=\"oof\",\"bar\"=\"rab\"", "line 1:20: invalid options provided: unknown option named [foo]");
- }
-
- public void testFromOptionsPartialInvalid() {
- expectError(
- FROM + " options \"allow_no_indices\"=\"true\",\"bar\"=\"rab\"",
- "line 1:46: invalid options provided: unknown option named [bar]"
- );
- }
-
- public void testFromOptionsInvalidIndicesOptionValue() {
- expectError(
- FROM + " options \"allow_no_indices\"=\"foo\"",
- "line 1:20: invalid options provided: Could not convert [allow_no_indices] to boolean"
- );
- }
-
- public void testFromOptionsEmptyIndicesOptionName() {
- expectError(FROM + " options \"\"=\"true\"", "line 1:20: invalid options provided: unknown option named []");
- }
-
- public void testFromOptionsEmptyIndicesOptionValue() {
- expectError(
- FROM + " options \"allow_no_indices\"=\"\"",
- "line 1:20: invalid options provided: Could not convert [allow_no_indices] to boolean. "
- + "Failed to parse value [] as only [true] or [false] are allowed."
- );
- expectError(
- FROM + " options \"ignore_unavailable\"=\"TRUE\"",
- "line 1:20: invalid options provided: Could not convert [ignore_unavailable] to boolean. "
- + "Failed to parse value [TRUE] as only [true] or [false] are allowed."
- );
- expectError(FROM + " options \"preference\"=\"\"", "line 1:20: invalid options provided: no Preference for []");
- }
-
- public void testFromOptionsSuggestedOptionName() {
- expectError(
- FROM + " options \"allow_indices\"=\"true\"",
- "line 1:20: invalid options provided: unknown option named [allow_indices], did you mean [allow_no_indices]?"
- );
- }
-
- public void testFromOptionsInvalidPreferValue() {
- expectError(FROM + " options \"preference\"=\"_foo\"", "line 1:20: invalid options provided: no Preference for [_foo]");
- }
-
- public void testFromOptionsUnquotedName() {
- expectError(FROM + " options allow_no_indices=\"oof\"", "line 1:19: mismatched input 'allow_no_indices' expecting QUOTED_STRING");
- }
-
- public void testFromOptionsUnquotedValue() {
- expectError(FROM + " options \"allow_no_indices\"=oof", "line 1:38: mismatched input 'oof' expecting QUOTED_STRING");
- }
-
- public void testFromOptionsDuplicates() {
- for (var name : List.of("allow_no_indices", "ignore_unavailable", "preference")) {
- String options = '"' + name + "\"=\"false\"";
- options += ',' + options;
- expectError(FROM + " options " + options, "invalid options provided: option [" + name + "] has already been provided");
- }
- }
-
- public void testFromOptionsValues() {
- boolean allowNoIndices = randomBoolean();
- boolean ignoreUnavailable = randomBoolean();
- String idsList = String.join(",", randomList(1, 5, () -> randomAlphaOfLengthBetween(1, 25)));
- String preference = randomFrom(
- "_only_local",
- "_local",
- "_only_nodes:" + idsList,
- "_prefer_nodes:" + idsList,
- "_shards:" + idsList,
- randomAlphaOfLengthBetween(1, 25)
- );
- List options = new ArrayList<>(3);
- options.add("\"allow_no_indices\"=\"" + allowNoIndices + "\"");
- options.add("\"ignore_unavailable\"=\"" + ignoreUnavailable + "\"");
- options.add("\"preference\"=\"" + preference + "\"");
- Randomness.shuffle(options);
- String optionsList = String.join(",", options);
-
- var plan = statement(FROM + " OPTIONS " + optionsList);
- var unresolved = as(plan, EsqlUnresolvedRelation.class);
- assertNotNull(unresolved.esSourceOptions());
- var indicesOptions = unresolved.esSourceOptions().indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS);
- assertThat(indicesOptions.allowNoIndices(), is(allowNoIndices));
- assertThat(indicesOptions.ignoreUnavailable(), is(ignoreUnavailable));
- assertThat(unresolved.esSourceOptions().preference(), is(preference));
- }
-
- public void testFromOptionsWithMetadata() {
- var plan = statement(FROM + " METADATA _id OPTIONS \"preference\"=\"foo\"");
- var unresolved = as(plan, EsqlUnresolvedRelation.class);
- assertNotNull(unresolved.esSourceOptions());
- assertThat(unresolved.esSourceOptions().preference(), is("foo"));
- assertFalse(unresolved.metadataFields().isEmpty());
- assertThat(unresolved.metadataFields().get(0).qualifiedName(), is("_id"));
- }
-
public void testDissectPattern() {
LogicalPlan cmd = processingCommand("dissect a \"%{foo}\"");
assertEquals(Dissect.class, cmd.getClass());
diff --git a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle
new file mode 100644
index 0000000000000..1d5369468b054
--- /dev/null
+++ b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle
@@ -0,0 +1,37 @@
+import org.elasticsearch.gradle.Version
+import org.elasticsearch.gradle.VersionProperties
+import org.elasticsearch.gradle.util.GradleUtils
+import org.elasticsearch.gradle.internal.info.BuildParams
+import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask
+
+apply plugin: 'elasticsearch.internal-java-rest-test'
+apply plugin: 'elasticsearch.internal-test-artifact-base'
+apply plugin: 'elasticsearch.bwc-test'
+
+dependencies {
+ testImplementation project(path: ':x-pack:plugin:inference:qa:inference-service-tests')
+ compileOnly project(':x-pack:plugin:core')
+ javaRestTestImplementation(testArtifact(project(xpackModule('core'))))
+ javaRestTestImplementation project(path: xpackModule('inference'))
+ clusterPlugins project(
+ ':x-pack:plugin:inference:qa:test-service-plugin'
+ )
+}
+
+// inference is available in 8.11 or later
+def supportedVersion = bwcVersion -> {
+ return bwcVersion.onOrAfter(Version.fromString("8.11.0"));
+}
+
+BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName ->
+ def javaRestTest = tasks.register("v${bwcVersion}#javaRestTest", StandaloneRestIntegTestTask) {
+ usesBwcDistribution(bwcVersion)
+ systemProperty("tests.old_cluster_version", bwcVersion)
+ maxParallelForks = 1
+ }
+
+ tasks.register(bwcTaskName(bwcVersion)) {
+ dependsOn javaRestTest
+ }
+}
+
diff --git a/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/BaseMixedTestCase.java b/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/BaseMixedTestCase.java
new file mode 100644
index 0000000000000..2c47578f466e3
--- /dev/null
+++ b/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/BaseMixedTestCase.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.inference.qa.mixed;
+
+import org.apache.http.util.EntityUtils;
+import org.elasticsearch.client.Request;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.inference.TaskType;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.http.MockWebServer;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.hamcrest.Matchers;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+public abstract class BaseMixedTestCase extends MixedClusterSpecTestCase {
+ protected static String getUrl(MockWebServer webServer) {
+ return Strings.format("http://%s:%s", webServer.getHostName(), webServer.getPort());
+ }
+
+ @Override
+ protected Settings restClientSettings() {
+ String token = ESRestTestCase.basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password".toCharArray()));
+ return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
+ }
+
+ protected void delete(String inferenceId, TaskType taskType) throws IOException {
+ var request = new Request("DELETE", Strings.format("_inference/%s/%s", taskType, inferenceId));
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ }
+
+ protected void delete(String inferenceId) throws IOException {
+ var request = new Request("DELETE", Strings.format("_inference/%s", inferenceId));
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ }
+
+ protected Map getAll() throws IOException {
+ var request = new Request("GET", "_inference/_all");
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ return ESRestTestCase.entityAsMap(response);
+ }
+
+ protected Map get(String inferenceId) throws IOException {
+ var endpoint = Strings.format("_inference/%s", inferenceId);
+ var request = new Request("GET", endpoint);
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ return ESRestTestCase.entityAsMap(response);
+ }
+
+ protected Map get(TaskType taskType, String inferenceId) throws IOException {
+ var endpoint = Strings.format("_inference/%s/%s", taskType, inferenceId);
+ var request = new Request("GET", endpoint);
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ return ESRestTestCase.entityAsMap(response);
+ }
+
+ protected Map inference(String inferenceId, TaskType taskType, String input) throws IOException {
+ var endpoint = Strings.format("_inference/%s/%s", taskType, inferenceId);
+ var request = new Request("POST", endpoint);
+ request.setJsonEntity("{\"input\": [" + '"' + input + '"' + "]}");
+
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ return ESRestTestCase.entityAsMap(response);
+ }
+
+ protected Map rerank(String inferenceId, List inputs, String query) throws IOException {
+ var endpoint = Strings.format("_inference/rerank/%s", inferenceId);
+ var request = new Request("POST", endpoint);
+
+ StringBuilder body = new StringBuilder("{").append("\"query\":\"").append(query).append("\",").append("\"input\":[");
+
+ for (int i = 0; i < inputs.size(); i++) {
+ body.append("\"").append(inputs.get(i)).append("\"");
+ if (i < inputs.size() - 1) {
+ body.append(",");
+ }
+ }
+
+ body.append("]}");
+ request.setJsonEntity(body.toString());
+
+ var response = ESRestTestCase.client().performRequest(request);
+ ESRestTestCase.assertOK(response);
+ return ESRestTestCase.entityAsMap(response);
+ }
+
+ protected void put(String inferenceId, String modelConfig, TaskType taskType) throws IOException {
+ String endpoint = Strings.format("_inference/%s/%s?error_trace", taskType, inferenceId);
+ var request = new Request("PUT", endpoint);
+ request.setJsonEntity(modelConfig);
+ var response = ESRestTestCase.client().performRequest(request);
+ logger.warn("PUT response: {}", response.toString());
+ System.out.println("PUT response: " + response.toString());
+ ESRestTestCase.assertOKAndConsume(response);
+ }
+
+ protected static void assertOkOrCreated(Response response) throws IOException {
+ int statusCode = response.getStatusLine().getStatusCode();
+ // Once EntityUtils.toString(entity) is called the entity cannot be reused.
+ // Avoid that call with check here.
+ if (statusCode == 200 || statusCode == 201) {
+ return;
+ }
+
+ String responseStr = EntityUtils.toString(response.getEntity());
+ ESTestCase.assertThat(
+ responseStr,
+ response.getStatusLine().getStatusCode(),
+ Matchers.anyOf(Matchers.equalTo(200), Matchers.equalTo(201))
+ );
+ }
+}
diff --git a/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java b/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java
new file mode 100644
index 0000000000000..69274b46d75c1
--- /dev/null
+++ b/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java
@@ -0,0 +1,271 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.inference.qa.mixed;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.inference.TaskType;
+import org.elasticsearch.test.http.MockResponse;
+import org.elasticsearch.test.http.MockWebServer;
+import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType;
+import org.hamcrest.Matchers;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.xpack.inference.qa.mixed.MixedClusterSpecTestCase.bwcVersion;
+import static org.hamcrest.Matchers.empty;
+import static org.hamcrest.Matchers.hasEntry;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.oneOf;
+
+public class CohereServiceMixedIT extends BaseMixedTestCase {
+
+ private static final String COHERE_EMBEDDINGS_ADDED = "8.13.0";
+ private static final String COHERE_RERANK_ADDED = "8.14.0";
+ private static final String BYTE_ALIAS_FOR_INT8_ADDED = "8.14.0";
+ private static final String MINIMUM_SUPPORTED_VERSION = "8.15.0";
+
+ private static MockWebServer cohereEmbeddingsServer;
+ private static MockWebServer cohereRerankServer;
+
+ @BeforeClass
+ public static void startWebServer() throws IOException {
+ cohereEmbeddingsServer = new MockWebServer();
+ cohereEmbeddingsServer.start();
+
+ cohereRerankServer = new MockWebServer();
+ cohereRerankServer.start();
+ }
+
+ @AfterClass
+ public static void shutdown() {
+ cohereEmbeddingsServer.close();
+ cohereRerankServer.close();
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testCohereEmbeddings() throws IOException {
+ var embeddingsSupported = bwcVersion.onOrAfter(Version.fromString(COHERE_EMBEDDINGS_ADDED));
+ assumeTrue("Cohere embedding service added in " + COHERE_EMBEDDINGS_ADDED, embeddingsSupported);
+ assumeTrue(
+ "Cohere service requires at least " + MINIMUM_SUPPORTED_VERSION,
+ bwcVersion.onOrAfter(Version.fromString(MINIMUM_SUPPORTED_VERSION))
+ );
+
+ final String inferenceIdInt8 = "mixed-cluster-cohere-embeddings-int8";
+ final String inferenceIdFloat = "mixed-cluster-cohere-embeddings-float";
+
+ // queue a response as PUT will call the service
+ cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte()));
+ put(inferenceIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING);
+
+ // float model
+ cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseFloat()));
+ put(inferenceIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING);
+
+ var configs = (List