Note that if {@link #detach()} has been called, this method is a no-op.
*/
- public synchronized void stop() {
+ public synchronized void stop() throws IOException {
if (detached) {
return;
}
@@ -93,7 +98,7 @@ public synchronized void stop() {
/**
* Stop the subprocess, sending a SIGKILL.
*/
- public void forceStop() {
+ public void forceStop() throws IOException {
assert detached == false;
jvmProcess.destroyForcibly();
waitFor();
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java
index b90ac25f5d57d..fcc290ebe9e72 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java
@@ -154,7 +154,7 @@ ServerProcess start(ProcessStarter processStarter) throws UserException {
boolean success = false;
try {
jvmProcess = createProcess(getCommand(), getJvmArgs(), jvmOptions, getEnvironment(), processStarter);
- errorPump = new ErrorPumpThread(terminal.getErrorWriter(), jvmProcess.getErrorStream());
+ errorPump = new ErrorPumpThread(terminal, jvmProcess.getErrorStream());
errorPump.start();
sendArgs(serverArgs, jvmProcess.getOutputStream());
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java
index e469764590bd6..38a64a778fc27 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java
@@ -33,6 +33,7 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.List;
import java.util.Locale;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -43,8 +44,11 @@
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.emptyString;
+import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.matchesRegex;
import static org.hamcrest.Matchers.not;
public class ServerCliTests extends CommandTestCase {
@@ -321,11 +325,16 @@ protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo,
throw new InterruptedException("interrupted while get jvm options");
}
};
- var e = expectThrows(
- InterruptedException.class,
- () -> command.main(new String[0], terminal, new ProcessInfo(sysprops, envVars, esHomeDir))
- );
- assertThat(e.getMessage(), equalTo("interrupted while get jvm options"));
+
+ int exitCode = command.main(new String[0], terminal, new ProcessInfo(sysprops, envVars, esHomeDir));
+ assertThat(exitCode, is(ExitCodes.CODE_ERROR));
+
+ String[] lines = terminal.getErrorOutput().split(System.lineSeparator());
+ assertThat(List.of(lines), hasSize(greaterThan(10))); // at least decent sized stacktrace
+ assertThat(lines[0], is("java.lang.InterruptedException: interrupted while get jvm options"));
+ assertThat(lines[1], matchesRegex("\\tat org.elasticsearch.server.cli.ServerCliTests.+startServer\\(ServerCliTests.java:\\d+\\)"));
+ assertThat(lines[lines.length - 1], matchesRegex("\tat java.base/java.lang.Thread.run\\(Thread.java:\\d+\\)"));
+
command.close();
}
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java
index b9f2eb73b30b5..dc36485fb77ab 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java
@@ -38,6 +38,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.CancellationException;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
@@ -393,15 +394,24 @@ public void testWaitFor() throws Exception {
stderr.println("final message");
};
var server = startProcess(false, false);
+
+ CompletableFuture stopping = new CompletableFuture<>();
new Thread(() -> {
- // simulate stop run as shutdown hook in another thread, eg from Ctrl-C
- nonInterruptibleVoid(mainReady::await);
- server.stop();
+ try {
+ // simulate stop run as shutdown hook in another thread, eg from Ctrl-C
+ nonInterruptibleVoid(mainReady::await);
+ server.stop();
+ stopping.complete(null);
+ } catch (Throwable e) {
+ stopping.completeExceptionally(e);
+ }
}).start();
int exitCode = server.waitFor();
assertThat(process.main.isDone(), is(true));
assertThat(exitCode, equalTo(0));
assertThat(terminal.getErrorOutput(), containsString("final message"));
+ // rethrow any potential exception observed while stopping
+ stopping.get();
}
public void testProcessDies() throws Exception {
diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java
index 22474e63ab0df..66ae78470c55d 100644
--- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java
+++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java
@@ -23,6 +23,8 @@
import org.elasticsearch.server.cli.ServerProcessBuilder;
import org.elasticsearch.server.cli.ServerProcessUtils;
+import java.io.IOException;
+
/**
* Starts an Elasticsearch process, but does not wait for it to exit.
*
@@ -55,7 +57,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce
}
@Override
- public void close() {
+ public void close() throws IOException {
if (server != null) {
server.stop();
}
diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java
index e4b651fcb77af..8f44eaa80f23a 100644
--- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java
+++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java
@@ -22,6 +22,8 @@
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyString;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.startsWith;
public class ProcrunCommandTests extends WindowsServiceCliTestCase {
@@ -111,8 +113,10 @@ protected String getDefaultFailureMessage() {
public void testMissingExe() throws Exception {
Files.delete(serviceExe);
- var e = expectThrows(IllegalStateException.class, () -> executeMain("install"));
- assertThat(e.getMessage(), containsString("Missing procrun exe"));
+ int exitCode = executeMain("install");
+
+ assertThat(exitCode, is(ExitCodes.CODE_ERROR));
+ assertThat(terminal.getErrorOutput(), startsWith("java.lang.IllegalStateException: Missing procrun exe"));
}
public void testServiceId() throws Exception {
diff --git a/docs/changelog/106820.yaml b/docs/changelog/106820.yaml
new file mode 100644
index 0000000000000..d854e3984c13d
--- /dev/null
+++ b/docs/changelog/106820.yaml
@@ -0,0 +1,5 @@
+pr: 106820
+summary: Add a capabilities API to check node and cluster capabilities
+area: Infra/REST API
+type: feature
+issues: []
diff --git a/docs/changelog/107088.yaml b/docs/changelog/107088.yaml
new file mode 100644
index 0000000000000..01a926f185eea
--- /dev/null
+++ b/docs/changelog/107088.yaml
@@ -0,0 +1,5 @@
+pr: 107088
+summary: Introduce role description field
+area: Authorization
+type: enhancement
+issues: []
diff --git a/docs/changelog/107876.yaml b/docs/changelog/107876.yaml
new file mode 100644
index 0000000000000..21624cacf7e1d
--- /dev/null
+++ b/docs/changelog/107876.yaml
@@ -0,0 +1,5 @@
+pr: 107876
+summary: "ESQL: Add aggregates node level reduction"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/107886.yaml b/docs/changelog/107886.yaml
deleted file mode 100644
index a328bc2a2a208..0000000000000
--- a/docs/changelog/107886.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107886
-summary: Cluster state role mapper file settings service
-area: Authorization
-type: enhancement
-issues: []
diff --git a/docs/changelog/107891.yaml b/docs/changelog/107891.yaml
new file mode 100644
index 0000000000000..deb3fbd2258ff
--- /dev/null
+++ b/docs/changelog/107891.yaml
@@ -0,0 +1,6 @@
+pr: 107891
+summary: Fix `startOffset` must be non-negative error in XLMRoBERTa tokenizer
+area: Machine Learning
+type: bug
+issues:
+ - 104626
diff --git a/docs/changelog/108088.yaml b/docs/changelog/108088.yaml
new file mode 100644
index 0000000000000..95c58f6dc19f1
--- /dev/null
+++ b/docs/changelog/108088.yaml
@@ -0,0 +1,5 @@
+pr: 108088
+summary: Add a SIMD (AVX2) optimised vector distance function for int7 on x64
+area: "Search"
+type: enhancement
+issues: []
diff --git a/docs/changelog/108238.yaml b/docs/changelog/108238.yaml
new file mode 100644
index 0000000000000..607979c2eb0ac
--- /dev/null
+++ b/docs/changelog/108238.yaml
@@ -0,0 +1,6 @@
+pr: 108238
+summary: "Nativeaccess: try to load all located libsystemds"
+area: Infra/Core
+type: bug
+issues:
+ - 107878
diff --git a/docs/changelog/108276.yaml b/docs/changelog/108276.yaml
deleted file mode 100644
index aaa78073f544e..0000000000000
--- a/docs/changelog/108276.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108276
-summary: Fix tsdb codec when doc-values spread in two blocks
-area: TSDB
-type: bug
-issues: []
diff --git a/docs/changelog/108280.yaml b/docs/changelog/108280.yaml
deleted file mode 100644
index b36a2f3769124..0000000000000
--- a/docs/changelog/108280.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108280
-summary: Ensure necessary security context for s3 bulk deletions
-area: Snapshot/Restore
-type: bug
-issues:
- - 108049
diff --git a/docs/changelog/108283.yaml b/docs/changelog/108283.yaml
deleted file mode 100644
index 6341a8775b729..0000000000000
--- a/docs/changelog/108283.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108283
-summary: Fix `BlockHash` `DirectEncoder`
-area: ES|QL
-type: bug
-issues:
- - 108268
diff --git a/docs/changelog/108300.yaml b/docs/changelog/108300.yaml
new file mode 100644
index 0000000000000..c4d6e468113a4
--- /dev/null
+++ b/docs/changelog/108300.yaml
@@ -0,0 +1,5 @@
+pr: 108300
+summary: "ESQL: Add more time span units"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/108333.yaml b/docs/changelog/108333.yaml
new file mode 100644
index 0000000000000..c3152500ce1b2
--- /dev/null
+++ b/docs/changelog/108333.yaml
@@ -0,0 +1,5 @@
+pr: 108333
+summary: Allow `read_slm` to call GET /_slm/status
+area: ILM+SLM
+type: bug
+issues: []
diff --git a/docs/changelog/108349.yaml b/docs/changelog/108349.yaml
new file mode 100644
index 0000000000000..6d9ea3d658dca
--- /dev/null
+++ b/docs/changelog/108349.yaml
@@ -0,0 +1,6 @@
+pr: 108349
+summary: "Ecs@mappings: reduce scope for `ecs_geo_point`"
+area: Data streams
+type: bug
+issues:
+ - 108338
diff --git a/docs/changelog/108365.yaml b/docs/changelog/108365.yaml
new file mode 100644
index 0000000000000..d94486e2f3ea7
--- /dev/null
+++ b/docs/changelog/108365.yaml
@@ -0,0 +1,5 @@
+pr: 108365
+summary: "[Bugfix] Connector API - fix status serialisation issue in termquery"
+area: Application
+type: bug
+issues: []
diff --git a/docs/changelog/108379.yaml b/docs/changelog/108379.yaml
new file mode 100644
index 0000000000000..312856a5db33d
--- /dev/null
+++ b/docs/changelog/108379.yaml
@@ -0,0 +1,5 @@
+pr: 108379
+summary: Create a new `NodeRequest` for every `NodesDataTiersUsageTransport` use
+area: Indices APIs
+type: bug
+issues: []
diff --git a/docs/changelog/108394.yaml b/docs/changelog/108394.yaml
new file mode 100644
index 0000000000000..58f48fa548c6e
--- /dev/null
+++ b/docs/changelog/108394.yaml
@@ -0,0 +1,6 @@
+pr: 108394
+summary: Handle `IndexNotFoundException`
+area: Transform
+type: bug
+issues:
+ - 107263
diff --git a/docs/changelog/108396.yaml b/docs/changelog/108396.yaml
new file mode 100644
index 0000000000000..63937646b755c
--- /dev/null
+++ b/docs/changelog/108396.yaml
@@ -0,0 +1,6 @@
+pr: 108396
+summary: "Apm-data: improve default pipeline performance"
+area: Data streams
+type: enhancement
+issues:
+ - 108290
diff --git a/docs/changelog/108410.yaml b/docs/changelog/108410.yaml
new file mode 100644
index 0000000000000..5fd831231a3be
--- /dev/null
+++ b/docs/changelog/108410.yaml
@@ -0,0 +1,5 @@
+pr: 108410
+summary: GeoIP tasks should wait longer for master
+area: Ingest Node
+type: bug
+issues: []
diff --git a/docs/changelog/108429.yaml b/docs/changelog/108429.yaml
new file mode 100644
index 0000000000000..562454a0de256
--- /dev/null
+++ b/docs/changelog/108429.yaml
@@ -0,0 +1,6 @@
+pr: 108429
+summary: Fix `ClassCastException` in Significant Terms
+area: Aggregations
+type: bug
+issues:
+ - 108427
diff --git a/docs/changelog/108431.yaml b/docs/changelog/108431.yaml
new file mode 100644
index 0000000000000..84607b1b99ac3
--- /dev/null
+++ b/docs/changelog/108431.yaml
@@ -0,0 +1,5 @@
+pr: 108431
+summary: "ESQL: Disable quoting in FROM command"
+area: ES|QL
+type: bug
+issues: []
diff --git a/docs/changelog/108444.yaml b/docs/changelog/108444.yaml
new file mode 100644
index 0000000000000..c946ab24f939a
--- /dev/null
+++ b/docs/changelog/108444.yaml
@@ -0,0 +1,5 @@
+pr: 108444
+summary: "Apm-data: ignore malformed fields, and too many dynamic fields"
+area: Data streams
+type: enhancement
+issues: []
diff --git a/docs/changelog/108459.yaml b/docs/changelog/108459.yaml
new file mode 100644
index 0000000000000..5e05797f284be
--- /dev/null
+++ b/docs/changelog/108459.yaml
@@ -0,0 +1,6 @@
+pr: 108459
+summary: Do not use global ordinals strategy if the leaf reader context cannot be
+ obtained
+area: Machine Learning
+type: bug
+issues: []
diff --git a/docs/internal/DistributedArchitectureGuide.md b/docs/internal/DistributedArchitectureGuide.md
index b8fb92b1ea15d..59305c6305737 100644
--- a/docs/internal/DistributedArchitectureGuide.md
+++ b/docs/internal/DistributedArchitectureGuide.md
@@ -10,70 +10,7 @@
### ActionListener
-Callbacks are used extensively throughout Elasticsearch because they enable us to write asynchronous and nonblocking code, i.e. code which
-doesn't necessarily compute a result straight away but also doesn't block the calling thread waiting for the result to become available.
-They support several useful control flows:
-
-- They can be completed immediately on the calling thread.
-- They can be completed concurrently on a different thread.
-- They can be stored in a data structure and completed later on when the system reaches a particular state.
-- Most commonly, they can be passed on to other methods that themselves require a callback.
-- They can be wrapped in another callback which modifies the behaviour of the original callback, perhaps adding some extra code to run
- before or after completion, before passing them on.
-
-`ActionListener` is a general-purpose callback interface that is used extensively across the Elasticsearch codebase. `ActionListener` is
-used pretty much everywhere that needs to perform some asynchronous and nonblocking computation. The uniformity makes it easier to compose
-parts of the system together without needing to build adapters to convert back and forth between different kinds of callback. It also makes
-it easier to develop the skills needed to read and understand all the asynchronous code, although this definitely takes practice and is
-certainly not easy in an absolute sense. Finally, it has allowed us to build a rich library for working with `ActionListener` instances
-themselves, creating new instances out of existing ones and completing them in interesting ways. See for instance:
-
-- all the static methods on [ActionListener](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/ActionListener.java) itself
-- [`ThreadedActionListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java) for forking work elsewhere
-- [`RefCountingListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java) for running work in parallel
-- [`SubscribableListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java) for constructing flexible workflows
-
-Callback-based asynchronous code can easily call regular synchronous code, but synchronous code cannot run callback-based asynchronous code
-without blocking the calling thread until the callback is called back. This blocking is at best undesirable (threads are too expensive to
-waste with unnecessary blocking) and at worst outright broken (the blocking can lead to deadlock). Unfortunately this means that most of our
-code ends up having to be written with callbacks, simply because it's ultimately calling into some other code that takes a callback. The
-entry points for all Elasticsearch APIs are callback-based (e.g. REST APIs all start at
-[`org.elasticsearch.rest.BaseRestHandler#prepareRequest`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java#L158-L171),
-and transport APIs all start at
-[`org.elasticsearch.action.support.TransportAction#doExecute`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/TransportAction.java#L65))
-and the whole system fundamentally works in terms of an event loop (a `io.netty.channel.EventLoop`) which processes network events via
-callbacks.
-
-`ActionListener` is not an _ad-hoc_ invention. Formally speaking, it is our implementation of the general concept of a continuation in the
-sense of [_continuation-passing style_](https://en.wikipedia.org/wiki/Continuation-passing_style) (CPS): an extra argument to a function
-which defines how to continue the computation when the result is available. This is in contrast to _direct style_ which is the more usual
-style of calling methods that return values directly back to the caller so they can continue executing as normal. There's essentially two
-ways that computation can continue in Java (it can return a value or it can throw an exception) which is why `ActionListener` has both an
-`onResponse()` and an `onFailure()` method.
-
-CPS is strictly more expressive than direct style: direct code can be mechanically translated into continuation-passing style, but CPS also
-enables all sorts of other useful control structures such as forking work onto separate threads, possibly to be executed in parallel,
-perhaps even across multiple nodes, or possibly collecting a list of continuations all waiting for the same condition to be satisfied before
-proceeding (e.g.
-[`SubscribableListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java)
-amongst many others). Some languages have first-class support for continuations (e.g. the `async` and `await` primitives in C#) allowing the
-programmer to write code in direct style away from those exotic control structures, but Java does not. That's why we have to manipulate all
-the callbacks ourselves.
-
-Strictly speaking, CPS requires that a computation _only_ continues by calling the continuation. In Elasticsearch, this means that
-asynchronous methods must have `void` return type and may not throw any exceptions. This is mostly the case in our code as written today,
-and is a good guiding principle, but we don't enforce void exceptionless methods and there are some deviations from this rule. In
-particular, it's not uncommon to permit some methods to throw an exception, using things like
-[`ActionListener#run`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/ActionListener.java#L381-L390)
-(or an equivalent `try ... catch ...` block) further up the stack to handle it. Some methods also take (and may complete) an
-`ActionListener` parameter, but still return a value separately for other local synchronous work.
-
-This pattern is often used in the transport action layer with the use of the
-[ChannelActionListener](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java)
-class, which wraps a `TransportChannel` produced by the transport layer. `TransportChannel` implementations can hold a reference to a Netty
-channel with which to pass the response back to the network caller. Netty has a many-to-one association of network callers to channels, so a
-call taking a long time generally won't hog resources: it's cheap. A transport action can take hours to respond and that's alright, barring
-caller timeouts.
+See the [Javadocs for `ActionListener`](https://github.com/elastic/elasticsearch/blob/main/server/src/main/java/org/elasticsearch/action/ActionListener.java)
(TODO: add useful starter references and explanations for a range of Listener classes. Reference the Netty section.)
@@ -133,6 +70,14 @@ are only used for internode operations/communications.
### Work Queues
+### RestClient
+
+The `RestClient` is primarily used in testing, to send requests against cluster nodes in the same format as would users. There
+are some uses of `RestClient`, via `RestClientBuilder`, in the production code. For example, remote reindex leverages the
+`RestClient` internally as the REST client to the remote elasticsearch cluster, and to take advantage of the compatibility of
+`RestClient` requests with much older elasticsearch versions. The `RestClient` is also used externally by the `Java API Client`
+to communicate with Elasticsearch.
+
# Cluster Coordination
(Sketch of important classes? Might inform more sections to add for details.)
diff --git a/docs/internal/GeneralArchitectureGuide.md b/docs/internal/GeneralArchitectureGuide.md
index f865277d07f8f..a2dadb70bf975 100644
--- a/docs/internal/GeneralArchitectureGuide.md
+++ b/docs/internal/GeneralArchitectureGuide.md
@@ -6,6 +6,66 @@
## Settings
+Elasticsearch supports [cluster-level settings][] and [index-level settings][], configurable via [node-level file settings][]
+(e.g. `elasticsearch.yml` file), command line arguments and REST APIs.
+
+### Declaring a Setting
+
+[cluster-level settings]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html
+[index-level settings]: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html
+[node-level file settings]: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
+
+The [Setting][] class is the building block for Elasticsearch server settings. Each `Setting` can take multiple [Property][]
+declarations to define setting characteristics. All setting values first come from the node-local `elasticsearch.yml` file,
+if they are set therein, before falling back to the default specified in their `Setting` declaration. [A setting][] with
+`Property.Dynamic` can be updated during runtime, but must be paired with a [local volatile variable like this one][] and
+registered in the `ClusterSettings` via a utility like [ClusterSettings#initializeAndWatch()][] to catch and immediately
+apply dynamic changes. NB that a common dynamic Setting bug is always reading the value directly from [Metadata#settings()][],
+which holds the default and dynamically updated values, but _not_ the node-local `elasticsearch.yml` value. The scope of a
+Setting must also be declared, such as `Property.IndexScope` for a setting that applies to indexes, or `Property.NodeScope`
+for a cluster-level setting.
+
+[Setting]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/common/settings/Setting.java#L57-L80
+[Property]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/common/settings/Setting.java#L82
+[A setting]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java#L111-L117
+[local volatile variable like this one]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java#L123
+[ClusterSettings#initializeAndWatch()]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java#L145
+[Metadata#settings()]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java#L713-L715
+
+[ClusterSettings][] tracks the [core Elasticsearch settings][]. Ultimately the `ClusterSettings` get loaded via the
+[SettingsModule][]. Additional settings from the various plugins are [collected during node construction] and passed into the
+[SettingsModule constructor][]. The Plugin interface has a [getSettings()][] method via which each plugin can declare additional
+settings.
+
+[ClusterSettings]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java#L138
+[core Elasticsearch settings]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java#L204-L586
+[SettingsModule]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java#L54
+[collected during node construction]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/node/NodeConstruction.java#L483
+[SettingsModule constructor]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/node/NodeConstruction.java#L491-L495
+[getSettings()]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/plugins/Plugin.java#L203-L208
+
+### Dynamically updating a Setting
+
+Externally, [TransportClusterUpdateSettingsAction][] and [TransportUpdateSettingsAction][] (and the corresponding REST endpoints)
+allow users to dynamically change cluster and index settings, respectively. Internally, `AbstractScopedSettings` (parent class
+of `ClusterSettings`) has various helper methods to track dynamic changes: it keeps a [registry of `SettingUpdater`][] consumer
+lambdas to run updates when settings are changed in the cluster state. The `ClusterApplierService` [sends setting updates][]
+through to the `AbstractScopedSettings`, invoking the consumers registered therein for each updated setting.
+
+[TransportClusterUpdateSettingsAction]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java#L154-L160
+[TransportUpdateSettingsAction]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java#L96-L101
+[registry of `SettingUpdater`]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java#L379-L381
+[sends setting updates]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java#L490-L494
+
+Index settings are always persisted. They can only be modified on an existing index, and setting values are persisted as part
+of the `IndexMetadata`. Cluster settings, however, can be either persisted or transient depending on how they are tied to
+[Metadata][] ([applied here][]). Changes to persisted cluster settings will survive a full cluster restart; whereas changes
+made to transient cluster settings will reset to their default values, or the `elasticsearch.yml` values, if the cluster
+state must ever be reloaded from persisted state.
+
+[Metadata]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java#L212-L213
+[applied here]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java#L2437
+
## Deprecations
## Plugins
diff --git a/docs/reference/alias.asciidoc b/docs/reference/alias.asciidoc
index e5c2db65778d8..9d784f530d63c 100644
--- a/docs/reference/alias.asciidoc
+++ b/docs/reference/alias.asciidoc
@@ -358,6 +358,8 @@ POST _aliases
----
// TEST[s/^/PUT my-index-2099.05.06-000001\n/]
+NOTE: Filters are only applied when using the <>, and are not applied when <>.
+
[discrete]
[[alias-routing]]
=== Routing
diff --git a/docs/reference/esql/esql-commands.asciidoc b/docs/reference/esql/esql-commands.asciidoc
index 708127718fe38..1f07361b89aac 100644
--- a/docs/reference/esql/esql-commands.asciidoc
+++ b/docs/reference/esql/esql-commands.asciidoc
@@ -39,7 +39,7 @@ image::images/esql/processing-command.svg[A processing command changing an input
* <>
* <>
* <>
-* <>
+* experimental:[] <>
* <>
* <>
* <>
diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc
index 663b2f8ecd249..b7928898a3bbb 100644
--- a/docs/reference/esql/esql-get-started.asciidoc
+++ b/docs/reference/esql/esql-get-started.asciidoc
@@ -1,12 +1,9 @@
[[esql-getting-started]]
== Getting started with {esql} queries
-
++++
Getting started
++++
-preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]
-
This guide shows how you can use {esql} to query and aggregate your data.
[TIP]
diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc
index c5d56ef15fdfd..c7f741d064310 100644
--- a/docs/reference/esql/esql-syntax.asciidoc
+++ b/docs/reference/esql/esql-syntax.asciidoc
@@ -160,14 +160,15 @@ Datetime intervals and timespans can be expressed using timespan literals.
Timespan literals are a combination of a number and a qualifier. These
qualifiers are supported:
-* `millisecond`/`milliseconds`
-* `second`/`seconds`
-* `minute`/`minutes`
-* `hour`/`hours`
-* `day`/`days`
-* `week`/`weeks`
-* `month`/`months`
-* `year`/`years`
+* `millisecond`/`milliseconds`/`ms`
+* `second`/`seconds`/`sec`/`s`
+* `minute`/`minutes`/`min`
+* `hour`/`hours`/`h`
+* `day`/`days`/`d`
+* `week`/`weeks`/`w`
+* `month`/`months`/`mo`
+* `quarter`/`quarters`/`q`
+* `year`/`years`/`yr`/`y`
Timespan literals are not whitespace sensitive. These expressions are all valid:
diff --git a/docs/reference/esql/functions/coalesce.asciidoc b/docs/reference/esql/functions/coalesce.asciidoc
deleted file mode 100644
index 2d8c0f379c82e..0000000000000
--- a/docs/reference/esql/functions/coalesce.asciidoc
+++ /dev/null
@@ -1,13 +0,0 @@
-[discrete]
-[[esql-coalesce]]
-=== `COALESCE`
-
-*Syntax*
-
-[source,esql]
-----
-COALESCE(expression1 [, ..., expressionN])
-----
-include::parameters/coalesce.asciidoc[]
-include::description/coalesce.asciidoc[]
-include::examples/coalesce.asciidoc[]
diff --git a/docs/reference/esql/functions/conditional-functions-and-expressions.asciidoc b/docs/reference/esql/functions/conditional-functions-and-expressions.asciidoc
index d835a14856c03..081e3b8589dba 100644
--- a/docs/reference/esql/functions/conditional-functions-and-expressions.asciidoc
+++ b/docs/reference/esql/functions/conditional-functions-and-expressions.asciidoc
@@ -15,7 +15,7 @@ manner. {esql} supports these conditional functions:
* <>
// end::cond_list[]
-include::case.asciidoc[]
-include::coalesce.asciidoc[]
-include::greatest.asciidoc[]
-include::least.asciidoc[]
+include::layout/case.asciidoc[]
+include::layout/coalesce.asciidoc[]
+include::layout/greatest.asciidoc[]
+include::layout/least.asciidoc[]
diff --git a/docs/reference/esql/functions/date-time-functions.asciidoc b/docs/reference/esql/functions/date-time-functions.asciidoc
index 8ce26eaabe381..eceb6378426a2 100644
--- a/docs/reference/esql/functions/date-time-functions.asciidoc
+++ b/docs/reference/esql/functions/date-time-functions.asciidoc
@@ -21,4 +21,4 @@ include::layout/date_extract.asciidoc[]
include::layout/date_format.asciidoc[]
include::layout/date_parse.asciidoc[]
include::layout/date_trunc.asciidoc[]
-include::now.asciidoc[]
+include::layout/now.asciidoc[]
diff --git a/docs/reference/esql/functions/description/case.asciidoc b/docs/reference/esql/functions/description/case.asciidoc
index 5c98a7a2620d0..c3e80301fbc31 100644
--- a/docs/reference/esql/functions/description/case.asciidoc
+++ b/docs/reference/esql/functions/description/case.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Accepts pairs of conditions and values. The function returns the value that belongs to the first condition that evaluates to true.
+Accepts pairs of conditions and values. The function returns the value that belongs to the first condition that evaluates to `true`. If the number of arguments is odd, the last argument is the default value which is returned when no condition matches. If the number of arguments is even, and no condition matches, the function returns `null`.
diff --git a/docs/reference/esql/functions/description/date_diff.asciidoc b/docs/reference/esql/functions/description/date_diff.asciidoc
index 3dd19b5885902..dbc03d59a2bf7 100644
--- a/docs/reference/esql/functions/description/date_diff.asciidoc
+++ b/docs/reference/esql/functions/description/date_diff.asciidoc
@@ -25,3 +25,9 @@ s|abbreviations
| microsecond | microseconds, mcs
| nanosecond | nanoseconds, ns
|===
+
+Note that while there is an overlap between the function's supported units and
+{esql}'s supported time span literals, these sets are distinct and not
+interchangeable. Similarly, the supported abbreviations are conveniently shared
+with implementations of this function in other established products and not
+necessarily common with the date-time nomenclature used by {es}.
diff --git a/docs/reference/esql/functions/description/greatest.asciidoc b/docs/reference/esql/functions/description/greatest.asciidoc
index 3c7cfd3bfb14c..ed705d0bbb59e 100644
--- a/docs/reference/esql/functions/description/greatest.asciidoc
+++ b/docs/reference/esql/functions/description/greatest.asciidoc
@@ -2,4 +2,6 @@
*Description*
-Returns the maximum value from many columns.
+Returns the maximum value from multiple columns. This is similar to <> except it is intended to run on multiple columns at once.
+
+NOTE: When run on `keyword` or `text` fields, this returns the last string in alphabetical order. When run on `boolean` columns this will return `true` if any values are `true`.
diff --git a/docs/reference/esql/functions/description/least.asciidoc b/docs/reference/esql/functions/description/least.asciidoc
index 2aeb1f85aa51a..c5daf0bc79ae0 100644
--- a/docs/reference/esql/functions/description/least.asciidoc
+++ b/docs/reference/esql/functions/description/least.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Returns the minimum value from many columns.
+Returns the minimum value from multiple columns. This is similar to <> except it is intended to run on multiple columns at once.
diff --git a/docs/reference/esql/functions/description/now.asciidoc b/docs/reference/esql/functions/description/now.asciidoc
new file mode 100644
index 0000000000000..4852c98b4980a
--- /dev/null
+++ b/docs/reference/esql/functions/description/now.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns current date and time.
diff --git a/docs/reference/esql/functions/description/st_contains.asciidoc b/docs/reference/esql/functions/description/st_contains.asciidoc
index 678fde7f5d98b..a2c81b9d24a10 100644
--- a/docs/reference/esql/functions/description/st_contains.asciidoc
+++ b/docs/reference/esql/functions/description/st_contains.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Returns whether the first geometry contains the second geometry.
+Returns whether the first geometry contains the second geometry. This is the inverse of the <> function.
diff --git a/docs/reference/esql/functions/description/st_disjoint.asciidoc b/docs/reference/esql/functions/description/st_disjoint.asciidoc
index 95ab02a39614a..461dd61daef7a 100644
--- a/docs/reference/esql/functions/description/st_disjoint.asciidoc
+++ b/docs/reference/esql/functions/description/st_disjoint.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Returns whether the two geometries or geometry columns are disjoint.
+Returns whether the two geometries or geometry columns are disjoint. This is the inverse of the <> function. In mathematical terms: ST_Disjoint(A, B) ⇔ A ⋂ B = ∅
diff --git a/docs/reference/esql/functions/description/st_intersects.asciidoc b/docs/reference/esql/functions/description/st_intersects.asciidoc
index b736ba29a6c8b..48fd7bdb2f338 100644
--- a/docs/reference/esql/functions/description/st_intersects.asciidoc
+++ b/docs/reference/esql/functions/description/st_intersects.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Returns whether the two geometries or geometry columns intersect.
+Returns true if two geometries intersect. They intersect if they have any point in common, including their interior points (points along lines or within polygons). This is the inverse of the <> function. In mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅
diff --git a/docs/reference/esql/functions/description/st_within.asciidoc b/docs/reference/esql/functions/description/st_within.asciidoc
index 890f28cb769b0..38a34f518234a 100644
--- a/docs/reference/esql/functions/description/st_within.asciidoc
+++ b/docs/reference/esql/functions/description/st_within.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Returns whether the first geometry is within the second geometry.
+Returns whether the first geometry is within the second geometry. This is the inverse of the <> function.
diff --git a/docs/reference/esql/functions/description/st_x.asciidoc b/docs/reference/esql/functions/description/st_x.asciidoc
index beb077bea332c..33d867f862429 100644
--- a/docs/reference/esql/functions/description/st_x.asciidoc
+++ b/docs/reference/esql/functions/description/st_x.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Extracts the x-coordinate from a point geometry.
+Extracts the `x` coordinate from the supplied point. If the points is of type `geo_point` this is equivalent to extracting the `longitude` value.
diff --git a/docs/reference/esql/functions/description/st_y.asciidoc b/docs/reference/esql/functions/description/st_y.asciidoc
index 19c371d2ef931..b03956a51e1a6 100644
--- a/docs/reference/esql/functions/description/st_y.asciidoc
+++ b/docs/reference/esql/functions/description/st_y.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Extracts the y-coordinate from a point geometry.
+Extracts the `y` coordinate from the supplied point. If the points is of type `geo_point` this is equivalent to extracting the `latitude` value.
diff --git a/docs/reference/esql/functions/examples/case.asciidoc b/docs/reference/esql/functions/examples/case.asciidoc
new file mode 100644
index 0000000000000..c5c766512ce0b
--- /dev/null
+++ b/docs/reference/esql/functions/examples/case.asciidoc
@@ -0,0 +1,32 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+Determine whether employees are monolingual, bilingual, or polyglot:
+[source.merge.styled,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=case]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=case-result]
+|===
+Calculate the total connection success rate based on log messages:
+[source.merge.styled,esql]
+----
+include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate-result]
+|===
+Calculate an hourly error rate as a percentage of the total number of log messages:
+[source.merge.styled,esql]
+----
+include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/greatest.asciidoc b/docs/reference/esql/functions/examples/greatest.asciidoc
new file mode 100644
index 0000000000000..bd89ad1b3cdd1
--- /dev/null
+++ b/docs/reference/esql/functions/examples/greatest.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/math.csv-spec[tag=greatest]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/math.csv-spec[tag=greatest-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/least.asciidoc b/docs/reference/esql/functions/examples/least.asciidoc
new file mode 100644
index 0000000000000..67fc5260f6391
--- /dev/null
+++ b/docs/reference/esql/functions/examples/least.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/math.csv-spec[tag=least]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/math.csv-spec[tag=least-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/now.asciidoc b/docs/reference/esql/functions/examples/now.asciidoc
new file mode 100644
index 0000000000000..b8953de93724c
--- /dev/null
+++ b/docs/reference/esql/functions/examples/now.asciidoc
@@ -0,0 +1,22 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/date.csv-spec[tag=docsNow]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/date.csv-spec[tag=docsNow-result]
+|===
+To retrieve logs from the last hour:
+[source.merge.styled,esql]
+----
+include::{esql-specs}/date.csv-spec[tag=docsNowWhere]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/date.csv-spec[tag=docsNowWhere-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/st_x.asciidoc b/docs/reference/esql/functions/examples/st_x.asciidoc
new file mode 100644
index 0000000000000..895e76c6c04e2
--- /dev/null
+++ b/docs/reference/esql/functions/examples/st_x.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/spatial.csv-spec[tag=st_x_y]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/st_y.asciidoc b/docs/reference/esql/functions/examples/st_y.asciidoc
new file mode 100644
index 0000000000000..895e76c6c04e2
--- /dev/null
+++ b/docs/reference/esql/functions/examples/st_y.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/spatial.csv-spec[tag=st_x_y]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result]
+|===
+
diff --git a/docs/reference/esql/functions/greatest.asciidoc b/docs/reference/esql/functions/greatest.asciidoc
deleted file mode 100644
index 003f1f46e6db5..0000000000000
--- a/docs/reference/esql/functions/greatest.asciidoc
+++ /dev/null
@@ -1,38 +0,0 @@
-[discrete]
-[[esql-greatest]]
-=== `GREATEST`
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/greatest.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`first`::
-First of the columns to evaluate.
-
-`rest`::
-The rest of the columns to evaluate.
-
-*Description*
-
-Returns the maximum value from multiple columns. This is similar to <>
-except it is intended to run on multiple columns at once.
-
-NOTE: When run on `keyword` or `text` fields, this returns the last string
- in alphabetical order. When run on `boolean` columns this will return
- `true` if any values are `true`.
-
-include::types/greatest.asciidoc[]
-
-*Example*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/math.csv-spec[tag=greatest]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/math.csv-spec[tag=greatest-result]
-|===
diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json
index 73bc215ac6ade..5959eed62d37b 100644
--- a/docs/reference/esql/functions/kibana/definition/case.json
+++ b/docs/reference/esql/functions/kibana/definition/case.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "case",
- "description" : "Accepts pairs of conditions and values.\nThe function returns the value that belongs to the first condition that evaluates to true.",
+ "description" : "Accepts pairs of conditions and values. The function returns the value that\nbelongs to the first condition that evaluates to `true`.\n\nIf the number of arguments is odd, the last argument is the default value which\nis returned when no condition matches. If the number of arguments is even, and\nno condition matches, the function returns `null`.",
"signatures" : [
{
"params" : [
@@ -10,23 +10,226 @@
"name" : "condition",
"type" : "boolean",
"optional" : false,
- "description" : ""
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "cartesian_point",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "cartesian_point"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "datetime",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "datetime"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "double",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "geo_point",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "geo_point"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "integer"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "ip"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
},
{
"name" : "trueValue",
"type" : "keyword",
"optional" : false,
- "description" : ""
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
},
{
"name" : "falseValue",
"type" : "keyword",
"optional" : true,
- "description" : ""
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
}
],
"variadic" : true,
"returnType" : "keyword"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "long",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "text",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "text"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "unsigned_long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "condition",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A condition."
+ },
+ {
+ "name" : "trueValue",
+ "type" : "version",
+ "optional" : false,
+ "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "version"
}
+ ],
+ "examples" : [
+ "FROM employees\n| EVAL type = CASE(\n languages <= 1, \"monolingual\",\n languages <= 2, \"bilingual\",\n \"polyglot\")\n| KEEP emp_no, languages, type",
+ "FROM sample_data\n| EVAL successful = CASE(\n STARTS_WITH(message, \"Connected to\"), 1,\n message == \"Connection error\", 0\n )\n| STATS success_rate = AVG(successful)",
+ "FROM sample_data\n| EVAL error = CASE(message LIKE \"*error*\", 1, 0)\n| EVAL hour = DATE_TRUNC(1 hour, @timestamp)\n| STATS error_rate = AVG(error) by hour\n| SORT hour"
]
}
diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json
index 87feead06d091..1081b42839577 100644
--- a/docs/reference/esql/functions/kibana/definition/coalesce.json
+++ b/docs/reference/esql/functions/kibana/definition/coalesce.json
@@ -10,7 +10,7 @@
"name" : "first",
"type" : "boolean",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
}
],
"variadic" : true,
@@ -22,13 +22,13 @@
"name" : "first",
"type" : "boolean",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
},
{
"name" : "rest",
"type" : "boolean",
"optional" : true,
- "description" : "Other expression to evaluate"
+ "description" : "Other expression to evaluate."
}
],
"variadic" : true,
@@ -40,7 +40,7 @@
"name" : "first",
"type" : "integer",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
}
],
"variadic" : true,
@@ -52,13 +52,13 @@
"name" : "first",
"type" : "integer",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
},
{
"name" : "rest",
"type" : "integer",
"optional" : true,
- "description" : "Other expression to evaluate"
+ "description" : "Other expression to evaluate."
}
],
"variadic" : true,
@@ -70,7 +70,7 @@
"name" : "first",
"type" : "keyword",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
}
],
"variadic" : true,
@@ -82,13 +82,13 @@
"name" : "first",
"type" : "keyword",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
},
{
"name" : "rest",
"type" : "keyword",
"optional" : true,
- "description" : "Other expression to evaluate"
+ "description" : "Other expression to evaluate."
}
],
"variadic" : true,
@@ -100,7 +100,7 @@
"name" : "first",
"type" : "long",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
}
],
"variadic" : true,
@@ -112,13 +112,13 @@
"name" : "first",
"type" : "long",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
},
{
"name" : "rest",
"type" : "long",
"optional" : true,
- "description" : "Other expression to evaluate"
+ "description" : "Other expression to evaluate."
}
],
"variadic" : true,
@@ -130,7 +130,7 @@
"name" : "first",
"type" : "text",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
}
],
"variadic" : true,
@@ -142,13 +142,13 @@
"name" : "first",
"type" : "text",
"optional" : false,
- "description" : "Expression to evaluate"
+ "description" : "Expression to evaluate."
},
{
"name" : "rest",
"type" : "text",
"optional" : true,
- "description" : "Other expression to evaluate"
+ "description" : "Other expression to evaluate."
}
],
"variadic" : true,
diff --git a/docs/reference/esql/functions/kibana/definition/greatest.json b/docs/reference/esql/functions/kibana/definition/greatest.json
index f72f54708c6b1..15c9f58d32d3e 100644
--- a/docs/reference/esql/functions/kibana/definition/greatest.json
+++ b/docs/reference/esql/functions/kibana/definition/greatest.json
@@ -2,7 +2,8 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "greatest",
- "description" : "Returns the maximum value from many columns.",
+ "description" : "Returns the maximum value from multiple columns. This is similar to <>\nexcept it is intended to run on multiple columns at once.",
+ "note" : "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. When run on `boolean` columns this will return `true` if any values are `true`.",
"signatures" : [
{
"params" : [
@@ -10,7 +11,7 @@
"name" : "first",
"type" : "boolean",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -22,13 +23,13 @@
"name" : "first",
"type" : "boolean",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "boolean",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -40,13 +41,13 @@
"name" : "first",
"type" : "double",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "double",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -58,7 +59,7 @@
"name" : "first",
"type" : "integer",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -70,13 +71,13 @@
"name" : "first",
"type" : "integer",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "integer",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -88,13 +89,13 @@
"name" : "first",
"type" : "ip",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "ip",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -106,7 +107,7 @@
"name" : "first",
"type" : "keyword",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -118,13 +119,13 @@
"name" : "first",
"type" : "keyword",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "keyword",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -136,7 +137,7 @@
"name" : "first",
"type" : "long",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -148,13 +149,13 @@
"name" : "first",
"type" : "long",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "long",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -166,7 +167,7 @@
"name" : "first",
"type" : "text",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -178,13 +179,13 @@
"name" : "first",
"type" : "text",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "text",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -196,17 +197,20 @@
"name" : "first",
"type" : "version",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "version",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
"returnType" : "version"
}
+ ],
+ "examples" : [
+ "ROW a = 10, b = 20\n| EVAL g = GREATEST(a, b)"
]
}
diff --git a/docs/reference/esql/functions/kibana/definition/least.json b/docs/reference/esql/functions/kibana/definition/least.json
index 66efedc0c9fe5..0b922ad6ad3c2 100644
--- a/docs/reference/esql/functions/kibana/definition/least.json
+++ b/docs/reference/esql/functions/kibana/definition/least.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "least",
- "description" : "Returns the minimum value from many columns.",
+ "description" : "Returns the minimum value from multiple columns. This is similar to <> except it is intended to run on multiple columns at once.",
"signatures" : [
{
"params" : [
@@ -10,7 +10,7 @@
"name" : "first",
"type" : "boolean",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -22,13 +22,13 @@
"name" : "first",
"type" : "boolean",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "boolean",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -40,13 +40,13 @@
"name" : "first",
"type" : "double",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "double",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -58,7 +58,7 @@
"name" : "first",
"type" : "integer",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -70,13 +70,13 @@
"name" : "first",
"type" : "integer",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "integer",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -88,13 +88,13 @@
"name" : "first",
"type" : "ip",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "ip",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -106,7 +106,7 @@
"name" : "first",
"type" : "keyword",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -118,13 +118,13 @@
"name" : "first",
"type" : "keyword",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "keyword",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -136,7 +136,7 @@
"name" : "first",
"type" : "long",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -148,13 +148,13 @@
"name" : "first",
"type" : "long",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "long",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -166,7 +166,7 @@
"name" : "first",
"type" : "text",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
}
],
"variadic" : true,
@@ -178,13 +178,13 @@
"name" : "first",
"type" : "text",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "text",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
@@ -196,17 +196,20 @@
"name" : "first",
"type" : "version",
"optional" : false,
- "description" : ""
+ "description" : "First of the columns to evaluate."
},
{
"name" : "rest",
"type" : "version",
"optional" : true,
- "description" : ""
+ "description" : "The rest of the columns to evaluate."
}
],
"variadic" : true,
"returnType" : "version"
}
+ ],
+ "examples" : [
+ "ROW a = 10, b = 20\n| EVAL l = LEAST(a, b)"
]
}
diff --git a/docs/reference/esql/functions/kibana/definition/now.json b/docs/reference/esql/functions/kibana/definition/now.json
new file mode 100644
index 0000000000000..9cdb4945afa2e
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/now.json
@@ -0,0 +1,16 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "eval",
+ "name" : "now",
+ "description" : "Returns current date and time.",
+ "signatures" : [
+ {
+ "params" : [ ],
+ "returnType" : "datetime"
+ }
+ ],
+ "examples" : [
+ "ROW current_date = NOW()",
+ "FROM sample_data\n| WHERE @timestamp > NOW() - 1 hour"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/st_contains.json b/docs/reference/esql/functions/kibana/definition/st_contains.json
index f4f8003917908..1ef76e46f371a 100644
--- a/docs/reference/esql/functions/kibana/definition/st_contains.json
+++ b/docs/reference/esql/functions/kibana/definition/st_contains.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "st_contains",
- "description" : "Returns whether the first geometry contains the second geometry.",
+ "description" : "Returns whether the first geometry contains the second geometry.\nThis is the inverse of the <> function.",
"signatures" : [
{
"params" : [
@@ -10,13 +10,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -28,13 +28,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -46,13 +46,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -64,13 +64,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -82,13 +82,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -100,13 +100,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -118,13 +118,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -136,13 +136,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
diff --git a/docs/reference/esql/functions/kibana/definition/st_disjoint.json b/docs/reference/esql/functions/kibana/definition/st_disjoint.json
index 98647b63ff18f..e408a0f98fe6c 100644
--- a/docs/reference/esql/functions/kibana/definition/st_disjoint.json
+++ b/docs/reference/esql/functions/kibana/definition/st_disjoint.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "st_disjoint",
- "description" : "Returns whether the two geometries or geometry columns are disjoint.",
+ "description" : "Returns whether the two geometries or geometry columns are disjoint.\nThis is the inverse of the <> function.\nIn mathematical terms: ST_Disjoint(A, B) ⇔ A ⋂ B = ∅",
"signatures" : [
{
"params" : [
@@ -10,13 +10,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -28,13 +28,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -46,13 +46,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -64,13 +64,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -82,13 +82,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -100,13 +100,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -118,13 +118,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -136,13 +136,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
diff --git a/docs/reference/esql/functions/kibana/definition/st_intersects.json b/docs/reference/esql/functions/kibana/definition/st_intersects.json
index ba619fe57ecf5..2f9f255ab1870 100644
--- a/docs/reference/esql/functions/kibana/definition/st_intersects.json
+++ b/docs/reference/esql/functions/kibana/definition/st_intersects.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "st_intersects",
- "description" : "Returns whether the two geometries or geometry columns intersect.",
+ "description" : "Returns true if two geometries intersect.\nThey intersect if they have any point in common, including their interior points\n(points along lines or within polygons).\nThis is the inverse of the <> function.\nIn mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅",
"signatures" : [
{
"params" : [
@@ -10,13 +10,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -28,13 +28,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -46,13 +46,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -64,13 +64,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -82,13 +82,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -100,13 +100,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -118,13 +118,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -136,13 +136,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
diff --git a/docs/reference/esql/functions/kibana/definition/st_within.json b/docs/reference/esql/functions/kibana/definition/st_within.json
index ee98337441ab7..e0cdf62fe0f98 100644
--- a/docs/reference/esql/functions/kibana/definition/st_within.json
+++ b/docs/reference/esql/functions/kibana/definition/st_within.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "st_within",
- "description" : "Returns whether the first geometry is within the second geometry.",
+ "description" : "Returns whether the first geometry is within the second geometry.\nThis is the inverse of the <> function.",
"signatures" : [
{
"params" : [
@@ -10,13 +10,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -28,13 +28,13 @@
"name" : "geomA",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -46,13 +46,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -64,13 +64,13 @@
"name" : "geomA",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "cartesian_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -82,13 +82,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -100,13 +100,13 @@
"name" : "geomA",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -118,13 +118,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_point",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
@@ -136,13 +136,13 @@
"name" : "geomA",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`."
},
{
"name" : "geomB",
"type" : "geo_shape",
"optional" : false,
- "description" : "Geometry column name or variable of geometry type"
+ "description" : "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters."
}
],
"variadic" : false,
diff --git a/docs/reference/esql/functions/kibana/definition/st_x.json b/docs/reference/esql/functions/kibana/definition/st_x.json
index 57598b3470e11..c3554a2ee808b 100644
--- a/docs/reference/esql/functions/kibana/definition/st_x.json
+++ b/docs/reference/esql/functions/kibana/definition/st_x.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "st_x",
- "description" : "Extracts the x-coordinate from a point geometry.",
+ "description" : "Extracts the `x` coordinate from the supplied point.\nIf the points is of type `geo_point` this is equivalent to extracting the `longitude` value.",
"signatures" : [
{
"params" : [
@@ -10,7 +10,7 @@
"name" : "point",
"type" : "cartesian_point",
"optional" : false,
- "description" : ""
+ "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`."
}
],
"variadic" : false,
@@ -22,11 +22,14 @@
"name" : "point",
"type" : "geo_point",
"optional" : false,
- "description" : ""
+ "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`."
}
],
"variadic" : false,
"returnType" : "double"
}
+ ],
+ "examples" : [
+ "ROW point = TO_GEOPOINT(\"POINT(42.97109629958868 14.7552534006536)\")\n| EVAL x = ST_X(point), y = ST_Y(point)"
]
}
diff --git a/docs/reference/esql/functions/kibana/definition/st_y.json b/docs/reference/esql/functions/kibana/definition/st_y.json
index 0dacaa56bb8de..2966ae04f75e4 100644
--- a/docs/reference/esql/functions/kibana/definition/st_y.json
+++ b/docs/reference/esql/functions/kibana/definition/st_y.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "st_y",
- "description" : "Extracts the y-coordinate from a point geometry.",
+ "description" : "Extracts the `y` coordinate from the supplied point.\nIf the points is of type `geo_point` this is equivalent to extracting the `latitude` value.",
"signatures" : [
{
"params" : [
@@ -10,7 +10,7 @@
"name" : "point",
"type" : "cartesian_point",
"optional" : false,
- "description" : ""
+ "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`."
}
],
"variadic" : false,
@@ -22,11 +22,14 @@
"name" : "point",
"type" : "geo_point",
"optional" : false,
- "description" : ""
+ "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`."
}
],
"variadic" : false,
"returnType" : "double"
}
+ ],
+ "examples" : [
+ "ROW point = TO_GEOPOINT(\"POINT(42.97109629958868 14.7552534006536)\")\n| EVAL x = ST_X(point), y = ST_Y(point)"
]
}
diff --git a/docs/reference/esql/functions/kibana/docs/case.md b/docs/reference/esql/functions/kibana/docs/case.md
index e1494a5c2af8c..8bb31ee972759 100644
--- a/docs/reference/esql/functions/kibana/docs/case.md
+++ b/docs/reference/esql/functions/kibana/docs/case.md
@@ -3,6 +3,18 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### CASE
-Accepts pairs of conditions and values.
-The function returns the value that belongs to the first condition that evaluates to true.
+Accepts pairs of conditions and values. The function returns the value that
+belongs to the first condition that evaluates to `true`.
+If the number of arguments is odd, the last argument is the default value which
+is returned when no condition matches. If the number of arguments is even, and
+no condition matches, the function returns `null`.
+
+```
+FROM employees
+| EVAL type = CASE(
+ languages <= 1, "monolingual",
+ languages <= 2, "bilingual",
+ "polyglot")
+| KEEP emp_no, languages, type
+```
diff --git a/docs/reference/esql/functions/kibana/docs/greatest.md b/docs/reference/esql/functions/kibana/docs/greatest.md
index 3db0c9ed87aa5..4b3b4027381f8 100644
--- a/docs/reference/esql/functions/kibana/docs/greatest.md
+++ b/docs/reference/esql/functions/kibana/docs/greatest.md
@@ -3,5 +3,11 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### GREATEST
-Returns the maximum value from many columns.
+Returns the maximum value from multiple columns. This is similar to <>
+except it is intended to run on multiple columns at once.
+```
+ROW a = 10, b = 20
+| EVAL g = GREATEST(a, b)
+```
+Note: When run on `keyword` or `text` fields, this returns the last string in alphabetical order. When run on `boolean` columns this will return `true` if any values are `true`.
diff --git a/docs/reference/esql/functions/kibana/docs/least.md b/docs/reference/esql/functions/kibana/docs/least.md
index ff2c19592c8e1..7bbbcf79bc374 100644
--- a/docs/reference/esql/functions/kibana/docs/least.md
+++ b/docs/reference/esql/functions/kibana/docs/least.md
@@ -3,5 +3,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### LEAST
-Returns the minimum value from many columns.
+Returns the minimum value from multiple columns. This is similar to <> except it is intended to run on multiple columns at once.
+```
+ROW a = 10, b = 20
+| EVAL l = LEAST(a, b)
+```
diff --git a/docs/reference/esql/functions/kibana/docs/now.md b/docs/reference/esql/functions/kibana/docs/now.md
new file mode 100644
index 0000000000000..5143dc843ebd8
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/docs/now.md
@@ -0,0 +1,10 @@
+
+
+### NOW
+Returns current date and time.
+
+```
+ROW current_date = NOW()
+```
diff --git a/docs/reference/esql/functions/kibana/docs/st_contains.md b/docs/reference/esql/functions/kibana/docs/st_contains.md
index 6e23bb9b0f116..99f3a19f9df41 100644
--- a/docs/reference/esql/functions/kibana/docs/st_contains.md
+++ b/docs/reference/esql/functions/kibana/docs/st_contains.md
@@ -4,6 +4,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### ST_CONTAINS
Returns whether the first geometry contains the second geometry.
+This is the inverse of the <> function.
```
FROM airport_city_boundaries
diff --git a/docs/reference/esql/functions/kibana/docs/st_disjoint.md b/docs/reference/esql/functions/kibana/docs/st_disjoint.md
index 7cf66b168bd70..4b42954efa5c1 100644
--- a/docs/reference/esql/functions/kibana/docs/st_disjoint.md
+++ b/docs/reference/esql/functions/kibana/docs/st_disjoint.md
@@ -4,6 +4,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### ST_DISJOINT
Returns whether the two geometries or geometry columns are disjoint.
+This is the inverse of the <> function.
+In mathematical terms: ST_Disjoint(A, B) ⇔ A ⋂ B = ∅
```
FROM airport_city_boundaries
diff --git a/docs/reference/esql/functions/kibana/docs/st_intersects.md b/docs/reference/esql/functions/kibana/docs/st_intersects.md
index e4db33429dbe3..b0a58b3ab2357 100644
--- a/docs/reference/esql/functions/kibana/docs/st_intersects.md
+++ b/docs/reference/esql/functions/kibana/docs/st_intersects.md
@@ -3,7 +3,11 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### ST_INTERSECTS
-Returns whether the two geometries or geometry columns intersect.
+Returns true if two geometries intersect.
+They intersect if they have any point in common, including their interior points
+(points along lines or within polygons).
+This is the inverse of the <> function.
+In mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅
```
FROM airports
diff --git a/docs/reference/esql/functions/kibana/docs/st_within.md b/docs/reference/esql/functions/kibana/docs/st_within.md
index cbb3ae5ee9aca..9ef046e5006f6 100644
--- a/docs/reference/esql/functions/kibana/docs/st_within.md
+++ b/docs/reference/esql/functions/kibana/docs/st_within.md
@@ -4,6 +4,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### ST_WITHIN
Returns whether the first geometry is within the second geometry.
+This is the inverse of the <> function.
```
FROM airport_city_boundaries
diff --git a/docs/reference/esql/functions/kibana/docs/st_x.md b/docs/reference/esql/functions/kibana/docs/st_x.md
index af2f4de1487cd..b113f19e1c76c 100644
--- a/docs/reference/esql/functions/kibana/docs/st_x.md
+++ b/docs/reference/esql/functions/kibana/docs/st_x.md
@@ -3,5 +3,10 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### ST_X
-Extracts the x-coordinate from a point geometry.
+Extracts the `x` coordinate from the supplied point.
+If the points is of type `geo_point` this is equivalent to extracting the `longitude` value.
+```
+ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)")
+| EVAL x = ST_X(point), y = ST_Y(point)
+```
diff --git a/docs/reference/esql/functions/kibana/docs/st_y.md b/docs/reference/esql/functions/kibana/docs/st_y.md
index 575a5bd3c7d33..db88c3ada63bb 100644
--- a/docs/reference/esql/functions/kibana/docs/st_y.md
+++ b/docs/reference/esql/functions/kibana/docs/st_y.md
@@ -3,5 +3,10 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### ST_Y
-Extracts the y-coordinate from a point geometry.
+Extracts the `y` coordinate from the supplied point.
+If the points is of type `geo_point` this is equivalent to extracting the `latitude` value.
+```
+ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)")
+| EVAL x = ST_X(point), y = ST_Y(point)
+```
diff --git a/docs/reference/esql/functions/layout/case.asciidoc b/docs/reference/esql/functions/layout/case.asciidoc
index 192e74522b8d3..edfc768dc7055 100644
--- a/docs/reference/esql/functions/layout/case.asciidoc
+++ b/docs/reference/esql/functions/layout/case.asciidoc
@@ -12,3 +12,4 @@ image::esql/functions/signature/case.svg[Embedded,opts=inline]
include::../parameters/case.asciidoc[]
include::../description/case.asciidoc[]
include::../types/case.asciidoc[]
+include::../examples/case.asciidoc[]
diff --git a/docs/reference/esql/functions/layout/greatest.asciidoc b/docs/reference/esql/functions/layout/greatest.asciidoc
index 1ff17f3c3adfe..fff9a32412947 100644
--- a/docs/reference/esql/functions/layout/greatest.asciidoc
+++ b/docs/reference/esql/functions/layout/greatest.asciidoc
@@ -12,3 +12,4 @@ image::esql/functions/signature/greatest.svg[Embedded,opts=inline]
include::../parameters/greatest.asciidoc[]
include::../description/greatest.asciidoc[]
include::../types/greatest.asciidoc[]
+include::../examples/greatest.asciidoc[]
diff --git a/docs/reference/esql/functions/layout/least.asciidoc b/docs/reference/esql/functions/layout/least.asciidoc
index a14a166c8bfe4..0daee9c181a65 100644
--- a/docs/reference/esql/functions/layout/least.asciidoc
+++ b/docs/reference/esql/functions/layout/least.asciidoc
@@ -12,3 +12,4 @@ image::esql/functions/signature/least.svg[Embedded,opts=inline]
include::../parameters/least.asciidoc[]
include::../description/least.asciidoc[]
include::../types/least.asciidoc[]
+include::../examples/least.asciidoc[]
diff --git a/docs/reference/esql/functions/layout/now.asciidoc b/docs/reference/esql/functions/layout/now.asciidoc
new file mode 100644
index 0000000000000..52341c1665619
--- /dev/null
+++ b/docs/reference/esql/functions/layout/now.asciidoc
@@ -0,0 +1,15 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[discrete]
+[[esql-now]]
+=== `NOW`
+
+*Syntax*
+
+[.text-center]
+image::esql/functions/signature/now.svg[Embedded,opts=inline]
+
+include::../parameters/now.asciidoc[]
+include::../description/now.asciidoc[]
+include::../types/now.asciidoc[]
+include::../examples/now.asciidoc[]
diff --git a/docs/reference/esql/functions/layout/st_x.asciidoc b/docs/reference/esql/functions/layout/st_x.asciidoc
index ce3824aa157b1..2c2dc191a31a4 100644
--- a/docs/reference/esql/functions/layout/st_x.asciidoc
+++ b/docs/reference/esql/functions/layout/st_x.asciidoc
@@ -12,3 +12,4 @@ image::esql/functions/signature/st_x.svg[Embedded,opts=inline]
include::../parameters/st_x.asciidoc[]
include::../description/st_x.asciidoc[]
include::../types/st_x.asciidoc[]
+include::../examples/st_x.asciidoc[]
diff --git a/docs/reference/esql/functions/layout/st_y.asciidoc b/docs/reference/esql/functions/layout/st_y.asciidoc
index 702e9097ae689..0708465760bb3 100644
--- a/docs/reference/esql/functions/layout/st_y.asciidoc
+++ b/docs/reference/esql/functions/layout/st_y.asciidoc
@@ -12,3 +12,4 @@ image::esql/functions/signature/st_y.svg[Embedded,opts=inline]
include::../parameters/st_y.asciidoc[]
include::../description/st_y.asciidoc[]
include::../types/st_y.asciidoc[]
+include::../examples/st_y.asciidoc[]
diff --git a/docs/reference/esql/functions/least.asciidoc b/docs/reference/esql/functions/least.asciidoc
deleted file mode 100644
index 2860eb31090c4..0000000000000
--- a/docs/reference/esql/functions/least.asciidoc
+++ /dev/null
@@ -1,38 +0,0 @@
-[discrete]
-[[esql-least]]
-=== `LEAST`
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/least.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`first`::
-First of the columns to evaluate.
-
-`rest`::
-The rest of the columns to evaluate.
-
-*Description*
-
-Returns the minimum value from multiple columns. This is similar to
-<> except it is intended to run on multiple columns at once.
-
-NOTE: When run on `keyword` or `text` fields, this returns the first string
- in alphabetical order. When run on `boolean` columns this will return
- `false` if any values are `false`.
-
-include::types/least.asciidoc[]
-
-*Example*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/math.csv-spec[tag=least]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/math.csv-spec[tag=least-result]
-|===
diff --git a/docs/reference/esql/functions/now.asciidoc b/docs/reference/esql/functions/now.asciidoc
deleted file mode 100644
index 3c46f557acd1f..0000000000000
--- a/docs/reference/esql/functions/now.asciidoc
+++ /dev/null
@@ -1,28 +0,0 @@
-[discrete]
-[[esql-now]]
-=== `NOW`
-
-*Syntax*
-
-[source,esql]
-----
-NOW()
-----
-
-*Description*
-
-Returns current date and time.
-
-*Example*
-
-[source,esql]
-----
-include::{esql-specs}/date.csv-spec[tag=docsNow]
-----
-
-To retrieve logs from the last hour:
-
-[source,esql]
-----
-include::{esql-specs}/date.csv-spec[tag=docsNowWhere]
-----
\ No newline at end of file
diff --git a/docs/reference/esql/functions/parameters/case.asciidoc b/docs/reference/esql/functions/parameters/case.asciidoc
index c3617b7c0e32c..ee6f7e499b3b3 100644
--- a/docs/reference/esql/functions/parameters/case.asciidoc
+++ b/docs/reference/esql/functions/parameters/case.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`condition`::
-
+A condition.
`trueValue`::
-
+The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches.
diff --git a/docs/reference/esql/functions/parameters/coalesce.asciidoc b/docs/reference/esql/functions/parameters/coalesce.asciidoc
index 9b62a2e7e0d87..e0860c5bc3030 100644
--- a/docs/reference/esql/functions/parameters/coalesce.asciidoc
+++ b/docs/reference/esql/functions/parameters/coalesce.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`first`::
-Expression to evaluate
+Expression to evaluate.
`rest`::
-Other expression to evaluate
+Other expression to evaluate.
diff --git a/docs/reference/esql/functions/parameters/greatest.asciidoc b/docs/reference/esql/functions/parameters/greatest.asciidoc
index 83ac29d0bf7c9..8d23101aba7f3 100644
--- a/docs/reference/esql/functions/parameters/greatest.asciidoc
+++ b/docs/reference/esql/functions/parameters/greatest.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`first`::
-
+First of the columns to evaluate.
`rest`::
-
+The rest of the columns to evaluate.
diff --git a/docs/reference/esql/functions/parameters/least.asciidoc b/docs/reference/esql/functions/parameters/least.asciidoc
index 83ac29d0bf7c9..8d23101aba7f3 100644
--- a/docs/reference/esql/functions/parameters/least.asciidoc
+++ b/docs/reference/esql/functions/parameters/least.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`first`::
-
+First of the columns to evaluate.
`rest`::
-
+The rest of the columns to evaluate.
diff --git a/docs/reference/esql/functions/parameters/now.asciidoc b/docs/reference/esql/functions/parameters/now.asciidoc
new file mode 100644
index 0000000000000..25b3c973f1a26
--- /dev/null
+++ b/docs/reference/esql/functions/parameters/now.asciidoc
@@ -0,0 +1,3 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Parameters*
diff --git a/docs/reference/esql/functions/parameters/st_contains.asciidoc b/docs/reference/esql/functions/parameters/st_contains.asciidoc
index e87a0d0eb94f0..2f969f0f3cf05 100644
--- a/docs/reference/esql/functions/parameters/st_contains.asciidoc
+++ b/docs/reference/esql/functions/parameters/st_contains.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`geomA`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
`geomB`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
diff --git a/docs/reference/esql/functions/parameters/st_disjoint.asciidoc b/docs/reference/esql/functions/parameters/st_disjoint.asciidoc
index e87a0d0eb94f0..2f969f0f3cf05 100644
--- a/docs/reference/esql/functions/parameters/st_disjoint.asciidoc
+++ b/docs/reference/esql/functions/parameters/st_disjoint.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`geomA`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
`geomB`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
diff --git a/docs/reference/esql/functions/parameters/st_intersects.asciidoc b/docs/reference/esql/functions/parameters/st_intersects.asciidoc
index e87a0d0eb94f0..2f969f0f3cf05 100644
--- a/docs/reference/esql/functions/parameters/st_intersects.asciidoc
+++ b/docs/reference/esql/functions/parameters/st_intersects.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`geomA`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
`geomB`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
diff --git a/docs/reference/esql/functions/parameters/st_within.asciidoc b/docs/reference/esql/functions/parameters/st_within.asciidoc
index e87a0d0eb94f0..2f969f0f3cf05 100644
--- a/docs/reference/esql/functions/parameters/st_within.asciidoc
+++ b/docs/reference/esql/functions/parameters/st_within.asciidoc
@@ -3,7 +3,7 @@
*Parameters*
`geomA`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
`geomB`::
-Geometry column name or variable of geometry type
+Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
diff --git a/docs/reference/esql/functions/parameters/st_x.asciidoc b/docs/reference/esql/functions/parameters/st_x.asciidoc
index 4e8e77dea1f86..b66bfc286a443 100644
--- a/docs/reference/esql/functions/parameters/st_x.asciidoc
+++ b/docs/reference/esql/functions/parameters/st_x.asciidoc
@@ -3,4 +3,4 @@
*Parameters*
`point`::
-
+Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`.
diff --git a/docs/reference/esql/functions/parameters/st_y.asciidoc b/docs/reference/esql/functions/parameters/st_y.asciidoc
index 4e8e77dea1f86..b66bfc286a443 100644
--- a/docs/reference/esql/functions/parameters/st_y.asciidoc
+++ b/docs/reference/esql/functions/parameters/st_y.asciidoc
@@ -3,4 +3,4 @@
*Parameters*
`point`::
-
+Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`.
diff --git a/docs/reference/esql/functions/signature/now.svg b/docs/reference/esql/functions/signature/now.svg
new file mode 100644
index 0000000000000..2cd48ac561408
--- /dev/null
+++ b/docs/reference/esql/functions/signature/now.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc
index b6d178ddd624d..d143681fcf2f2 100644
--- a/docs/reference/esql/functions/spatial-functions.asciidoc
+++ b/docs/reference/esql/functions/spatial-functions.asciidoc
@@ -16,9 +16,9 @@
* experimental:[] <>
// end::spatial_list[]
-include::st_intersects.asciidoc[]
-include::st_disjoint.asciidoc[]
-include::st_contains.asciidoc[]
-include::st_within.asciidoc[]
-include::st_x.asciidoc[]
-include::st_y.asciidoc[]
+include::layout/st_intersects.asciidoc[]
+include::layout/st_disjoint.asciidoc[]
+include::layout/st_contains.asciidoc[]
+include::layout/st_within.asciidoc[]
+include::layout/st_x.asciidoc[]
+include::layout/st_y.asciidoc[]
diff --git a/docs/reference/esql/functions/st_contains.asciidoc b/docs/reference/esql/functions/st_contains.asciidoc
deleted file mode 100644
index 110c4fe4ca9ec..0000000000000
--- a/docs/reference/esql/functions/st_contains.asciidoc
+++ /dev/null
@@ -1,26 +0,0 @@
-[discrete]
-[[esql-st_contains]]
-=== `ST_CONTAINS`
-
-experimental::[]
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/st_contains.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`geomA`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-
-`geomB`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-The second parameter must also have the same coordinate system as the first.
-This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
-
-include::description/st_contains.asciidoc[]
-This is the inverse of the <> function.
-
-include::types/st_contains.asciidoc[]
-include::examples/st_contains.asciidoc[]
diff --git a/docs/reference/esql/functions/st_disjoint.asciidoc b/docs/reference/esql/functions/st_disjoint.asciidoc
deleted file mode 100644
index db89ca186a0ff..0000000000000
--- a/docs/reference/esql/functions/st_disjoint.asciidoc
+++ /dev/null
@@ -1,27 +0,0 @@
-[discrete]
-[[esql-st_disjoint]]
-=== `ST_DISJOINT`
-
-experimental::[]
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/st_disjoint.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`geomA`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-
-`geomB`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-The second parameter must also have the same coordinate system as the first.
-This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
-
-include::description/st_disjoint.asciidoc[]
-This is the inverse of the <> function.
-In mathematical terms: ST_Disjoint(A, B) ⇔ A ⋂ B = ∅
-
-include::types/st_disjoint.asciidoc[]
-include::examples/st_disjoint.asciidoc[]
diff --git a/docs/reference/esql/functions/st_intersects.asciidoc b/docs/reference/esql/functions/st_intersects.asciidoc
deleted file mode 100644
index d75a7f3a50e0f..0000000000000
--- a/docs/reference/esql/functions/st_intersects.asciidoc
+++ /dev/null
@@ -1,31 +0,0 @@
-[discrete]
-[[esql-st_intersects]]
-=== `ST_INTERSECTS`
-
-experimental::[]
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/st_intersects.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`geomA`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-
-`geomB`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-The second parameter must also have the same coordinate system as the first.
-This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
-
-*Description*
-
-Returns true if two geometries intersect.
-They intersect if they have any point in common, including their interior points
-(points along lines or within polygons).
-This is the inverse of the <> function.
-In mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅
-
-include::types/st_intersects.asciidoc[]
-include::examples/st_intersects.asciidoc[]
diff --git a/docs/reference/esql/functions/st_within.asciidoc b/docs/reference/esql/functions/st_within.asciidoc
deleted file mode 100644
index 0f0190a9de638..0000000000000
--- a/docs/reference/esql/functions/st_within.asciidoc
+++ /dev/null
@@ -1,26 +0,0 @@
-[discrete]
-[[esql-st_within]]
-=== `ST_WITHIN`
-
-experimental::[]
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/st_within.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`geomA`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-
-`geomB`::
-Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`.
-The second parameter must also have the same coordinate system as the first.
-This means it is not possible to combine `geo_*` and `cartesian_*` parameters.
-
-include::description/st_within.asciidoc[]
-This is the inverse of the <> function.
-
-include::types/st_within.asciidoc[]
-include::examples/st_within.asciidoc[]
diff --git a/docs/reference/esql/functions/st_x.asciidoc b/docs/reference/esql/functions/st_x.asciidoc
deleted file mode 100644
index eec48894b5150..0000000000000
--- a/docs/reference/esql/functions/st_x.asciidoc
+++ /dev/null
@@ -1,33 +0,0 @@
-[discrete]
-[[esql-st_x]]
-=== `ST_X`
-
-experimental::[]
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/st_x.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`point`::
-Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`.
-
-*Description*
-
-Extracts the `x` coordinate from the supplied point.
-If the points is of type `geo_point` this is equivalent to extracting the `longitude` value.
-
-include::types/st_x.asciidoc[]
-
-*Example*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/spatial.csv-spec[tag=st_x_y]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result]
-|===
diff --git a/docs/reference/esql/functions/st_y.asciidoc b/docs/reference/esql/functions/st_y.asciidoc
deleted file mode 100644
index 8fc7281e395d2..0000000000000
--- a/docs/reference/esql/functions/st_y.asciidoc
+++ /dev/null
@@ -1,33 +0,0 @@
-[discrete]
-[[esql-st_y]]
-=== `ST_Y`
-
-experimental::[]
-
-*Syntax*
-
-[.text-center]
-image::esql/functions/signature/st_y.svg[Embedded,opts=inline]
-
-*Parameters*
-
-`point`::
-Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`.
-
-*Description*
-
-Extracts the `y` coordinate from the supplied point.
-If the points is of type `geo_point` this is equivalent to extracting the `latitude` value.
-
-include::types/st_y.asciidoc[]
-
-*Example*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/spatial.csv-spec[tag=st_x_y]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result]
-|===
diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc
index e7d627ab915a1..85e4193b5bf2f 100644
--- a/docs/reference/esql/functions/types/case.asciidoc
+++ b/docs/reference/esql/functions/types/case.asciidoc
@@ -5,5 +5,15 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
condition | trueValue | result
-keyword
+boolean | boolean | boolean
+boolean | cartesian_point | cartesian_point
+boolean | datetime | datetime
+boolean | double | double
+boolean | geo_point | geo_point
+boolean | integer | integer
+boolean | ip | ip
+boolean | long | long
+boolean | text | text
+boolean | unsigned_long | unsigned_long
+boolean | version | version
|===
diff --git a/docs/reference/esql/functions/types/now.asciidoc b/docs/reference/esql/functions/types/now.asciidoc
new file mode 100644
index 0000000000000..5737d98f2f7db
--- /dev/null
+++ b/docs/reference/esql/functions/types/now.asciidoc
@@ -0,0 +1,9 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Supported types*
+
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+result
+datetime
+|===
diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc
index 5cb02064dc794..54627a6de3c62 100644
--- a/docs/reference/esql/index.asciidoc
+++ b/docs/reference/esql/index.asciidoc
@@ -6,8 +6,6 @@
[partintro]
-preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]
-
The {es} Query Language ({esql}) provides a powerful way to filter, transform,
and analyze data stored in {es}, and in the future in other runtimes. It is
designed to be easy to learn and use, by end users, SRE teams, application
diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc
index f73eea6018cbc..5470d81b2f40b 100644
--- a/docs/reference/esql/processing-commands/enrich.asciidoc
+++ b/docs/reference/esql/processing-commands/enrich.asciidoc
@@ -57,11 +57,11 @@ in this example). `ENRICH` will look for records in the
[source.merge.styled,esql]
----
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich]
+include::{esql-specs}/enrich.csv-spec[tag=enrich]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich-result]
+include::{esql-specs}/enrich.csv-spec[tag=enrich-result]
|===
To use a column with a different name than the `match_field` defined in the
@@ -69,11 +69,11 @@ policy as the match field, use `ON `:
[source.merge.styled,esql]
----
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_on]
+include::{esql-specs}/enrich.csv-spec[tag=enrich_on]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_on-result]
+include::{esql-specs}/enrich.csv-spec[tag=enrich_on-result]
|===
By default, each of the enrich fields defined in the policy is added as a
@@ -82,22 +82,22 @@ column. To explicitly select the enrich fields that are added, use
[source.merge.styled,esql]
----
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_with]
+include::{esql-specs}/enrich.csv-spec[tag=enrich_with]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_with-result]
+include::{esql-specs}/enrich.csv-spec[tag=enrich_with-result]
|===
You can rename the columns that are added using `WITH new_name=`:
[source.merge.styled,esql]
----
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_rename]
+include::{esql-specs}/enrich.csv-spec[tag=enrich_rename]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
-include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_rename-result]
+include::{esql-specs}/enrich.csv-spec[tag=enrich_rename-result]
|===
In case of name collisions, the newly created columns will override existing
diff --git a/docs/reference/esql/processing-commands/mv_expand.asciidoc b/docs/reference/esql/processing-commands/mv_expand.asciidoc
index 46dc4fd0a33cf..9e1cb5573c381 100644
--- a/docs/reference/esql/processing-commands/mv_expand.asciidoc
+++ b/docs/reference/esql/processing-commands/mv_expand.asciidoc
@@ -2,6 +2,8 @@
[[esql-mv_expand]]
=== `MV_EXPAND`
+preview::[]
+
**Syntax**
[source,esql]
diff --git a/docs/reference/high-availability/cluster-design.asciidoc b/docs/reference/high-availability/cluster-design.asciidoc
index 3f8e19b47d37a..6c17a494f36ae 100644
--- a/docs/reference/high-availability/cluster-design.asciidoc
+++ b/docs/reference/high-availability/cluster-design.asciidoc
@@ -7,14 +7,14 @@ nodes to take over their responsibilities, an {es} cluster can continue
operating normally if some of its nodes are unavailable or disconnected.
There is a limit to how small a resilient cluster can be. All {es} clusters
-require:
+require the following components to function:
-- One <> node
-- At least one node for each <>.
-- At least one copy of every <>.
+- One <>
+- At least one node for each <>
+- At least one copy of every <>
A resilient cluster requires redundancy for every required cluster component.
-This means a resilient cluster must have:
+This means a resilient cluster must have the following components:
- At least three master-eligible nodes
- At least two nodes of each role
@@ -375,11 +375,11 @@ The cluster will be resilient to the loss of any zone as long as:
- There are at least two zones containing data nodes.
- Every index that is not a <>
has at least one replica of each shard, in addition to the primary.
-- Shard allocation awareness is configured to avoid concentrating all copies of
- a shard within a single zone.
+- <> is configured to
+ avoid concentrating all copies of a shard within a single zone.
- The cluster has at least three master-eligible nodes. At least two of these
- nodes are not voting-only master-eligible nodes, and they are spread evenly
- across at least three zones.
+ nodes are not <>,
+ and they are spread evenly across at least three zones.
- Clients are configured to send their requests to nodes in more than one zone
or are configured to use a load balancer that balances the requests across an
appropriate set of nodes. The {ess-trial}[Elastic Cloud] service provides such
diff --git a/docs/reference/images/shard-allocation/shard-allocation-awareness-one-rack.png b/docs/reference/images/shard-allocation/shard-allocation-awareness-one-rack.png
new file mode 100644
index 0000000000000..d5a3040cc5343
Binary files /dev/null and b/docs/reference/images/shard-allocation/shard-allocation-awareness-one-rack.png differ
diff --git a/docs/reference/images/shard-allocation/shard-allocation-awareness-two-racks.png b/docs/reference/images/shard-allocation/shard-allocation-awareness-two-racks.png
new file mode 100644
index 0000000000000..ce2ce6b2a95e9
Binary files /dev/null and b/docs/reference/images/shard-allocation/shard-allocation-awareness-two-racks.png differ
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
index e47304f1e1337..2057519719177 100644
--- a/docs/reference/index.asciidoc
+++ b/docs/reference/index.asciidoc
@@ -10,7 +10,7 @@ include::intro.asciidoc[]
include::release-notes/highlights.asciidoc[]
-include::getting-started.asciidoc[]
+include::quickstart/index.asciidoc[]
include::setup.asciidoc[]
diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc
index 2e678b929d296..89eb6e8559056 100644
--- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc
+++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc
@@ -27,12 +27,7 @@ TIP: If you have created rules for specific {anomaly-jobs} and you want to
monitor whether these jobs work as expected, {anomaly-jobs} health rules are
ideal for this purpose.
-In *{stack-manage-app} > {rules-ui}*, you can create both types of {ml} rules:
-
-[role="screenshot"]
-image::images/ml-rule.png["Creating a new machine learning rule",500]
-// NOTE: This is an autogenerated screenshot. Do not edit it directly.
-
+In *{stack-manage-app} > {rules-ui}*, you can create both types of {ml} rules.
In the *{ml-app}* app, you can create only {anomaly-detect} alert rules; create
them from the {anomaly-job} wizard after you start the job or from the
{anomaly-job} list.
diff --git a/docs/reference/ml/images/ml-rule.png b/docs/reference/ml/images/ml-rule.png
deleted file mode 100644
index f7ebcb3716b81..0000000000000
Binary files a/docs/reference/ml/images/ml-rule.png and /dev/null differ
diff --git a/docs/reference/modules/cluster/allocation_awareness.asciidoc b/docs/reference/modules/cluster/allocation_awareness.asciidoc
index d447026fae293..9c6197f9ba40d 100644
--- a/docs/reference/modules/cluster/allocation_awareness.asciidoc
+++ b/docs/reference/modules/cluster/allocation_awareness.asciidoc
@@ -5,7 +5,7 @@ You can use custom node attributes as _awareness attributes_ to enable {es}
to take your physical hardware configuration into account when allocating shards.
If {es} knows which nodes are on the same physical server, in the same rack, or
in the same zone, it can distribute the primary shard and its replica shards to
-minimise the risk of losing all shard copies in the event of a failure.
+minimize the risk of losing all shard copies in the event of a failure.
When shard allocation awareness is enabled with the
<>
@@ -19,22 +19,27 @@ allocated in each location. If the number of nodes in each location is
unbalanced and there are a lot of replicas, replica shards might be left
unassigned.
+TIP: Learn more about <>.
+
[[enabling-awareness]]
===== Enabling shard allocation awareness
To enable shard allocation awareness:
-. Specify the location of each node with a custom node attribute. For example,
-if you want Elasticsearch to distribute shards across different racks, you might
-set an awareness attribute called `rack_id` in each node's `elasticsearch.yml`
-config file.
+. Specify the location of each node with a custom node attribute. For example,
+if you want Elasticsearch to distribute shards across different racks, you might
+use an awareness attribute called `rack_id`.
++
+You can set custom attributes in two ways:
+
+- By editing the `elasticsearch.yml` config file:
+
[source,yaml]
--------------------------------------------------------
node.attr.rack_id: rack_one
--------------------------------------------------------
+
-You can also set custom attributes when you start a node:
+- Using the `-E` command line argument when you start a node:
+
[source,sh]
--------------------------------------------------------
@@ -56,17 +61,33 @@ cluster.routing.allocation.awareness.attributes: rack_id <1>
+
You can also use the
<> API to set or update
-a cluster's awareness attributes.
+a cluster's awareness attributes:
++
+[source,console]
+--------------------------------------------------
+PUT /_cluster/settings
+{
+ "persistent" : {
+ "cluster.routing.allocation.awareness.attributes" : "rack_id"
+ }
+}
+--------------------------------------------------
With this example configuration, if you start two nodes with
`node.attr.rack_id` set to `rack_one` and create an index with 5 primary
shards and 1 replica of each primary, all primaries and replicas are
-allocated across the two nodes.
+allocated across the two node.
+
+.All primaries and replicas allocated across two nodes in the same rack
+image::images/shard-allocation/shard-allocation-awareness-one-rack.png[All primaries and replicas are allocated across two nodes in the same rack]
If you add two nodes with `node.attr.rack_id` set to `rack_two`,
{es} moves shards to the new nodes, ensuring (if possible)
that no two copies of the same shard are in the same rack.
+.Primaries and replicas allocated across four nodes in two racks, with no two copies of the same shard in the same rack
+image::images/shard-allocation/shard-allocation-awareness-two-racks.png[Primaries and replicas are allocated across four nodes in two racks with no two copies of the same shard in the same rack]
+
If `rack_two` fails and takes down both its nodes, by default {es}
allocates the lost shard copies to nodes in `rack_one`. To prevent multiple
copies of a particular shard from being allocated in the same location, you can
diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc
index 848a29c64279c..2308ec259da48 100644
--- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc
+++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc
@@ -59,35 +59,40 @@ you configure the remotes.
`cluster.remote..transport.compress`::
- Per cluster setting that enables you to configure compression for requests
- to a specific remote cluster. This setting impacts only requests
- sent to the remote cluster. If the inbound request is compressed,
- Elasticsearch compresses the response. The setting options are `true`,
- `indexing_data`, and `false`. If unset, the global `transport.compress` is
- used as the fallback setting.
+ Per-cluster setting that enables you to configure compression for requests to
+ a specific remote cluster. The handling cluster will automatically compress
+ responses to compressed requests. The setting options are `true`,
+ `indexing_data`, and `false`. If unset, defaults to the behaviour specified
+ by the node-wide `transport.compress` setting. See the
+ <> for further information.
`cluster.remote..transport.compression_scheme`::
- Per cluster setting that enables you to configure compression scheme for
- requests to a specific remote cluster. This setting impacts only requests
- sent to the remote cluster. If an inbound request is compressed, {es}
- compresses the response using the same compression scheme. The setting options
- are `deflate` and `lz4`. If unset, the global `transport.compression_scheme`
- is used as the fallback setting.
+ Per-cluster setting that enables you to configure the compression scheme for
+ requests to a specific cluster if those requests are selected to be
+ compressed by to the `cluster.remote..transport.compress`
+ setting. The handling cluster will automatically use the same compression
+ scheme for responses as for the corresponding requests. The setting options
+ are `deflate` and `lz4`. If unset, defaults to the behaviour specified by the
+ node-wide `transport.compression_scheme` setting. See the
+ <> for further information.
-
-`cluster.remote..credentials` (<>, <>)::
[[remote-cluster-credentials-setting]]
-
- Per cluster setting for configuring <>.
- This setting takes the encoded value of a
- <> and must be set
- in the <> on each node in the cluster.
- The presence (or not) of this setting determines which model a remote cluster uses.
- If present, the remote cluster uses the API key based model.
- Otherwise, it uses the certificate based model.
- If the setting is added, removed, or updated in the <> and reloaded via the
- <> API, the cluster will automatically rebuild its connection to the remote.
+`cluster.remote..credentials`::
+
+ (<>, <>)
+ Per-cluster setting for configuring <>. This setting takes the encoded value of a
+ <> and must
+ be set in the <> on each node in the cluster.
+ The presence (or not) of this setting determines which model a remote cluster
+ uses. If present, the remote cluster uses the API key based model. Otherwise,
+ it uses the certificate based model. If the setting is added, removed, or
+ updated in the <> and reloaded via the
+ <> API, the cluster will automatically
+ rebuild its connection to the remote.
[[remote-cluster-sniff-settings]]
==== Sniff mode remote cluster settings
diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc
index 2ec574544f9bb..d08da2cfc1d2f 100644
--- a/docs/reference/modules/transport.asciidoc
+++ b/docs/reference/modules/transport.asciidoc
@@ -47,20 +47,44 @@ different from `transport.port`. Defaults to the port assigned via
The connect timeout for initiating a new connection (in
time setting format). Defaults to `30s`.
+[[transport-settings-compress]]
`transport.compress`::
(<>, string)
-Set to `true`, `indexing_data`, or `false` to configure transport compression
-between nodes. The option `true` will compress all data. The option
-`indexing_data` will compress only the raw index data sent between nodes during
-ingest, ccr following (excluding bootstrap), and operations based shard recovery
-(excluding transferring lucene files). Defaults to `indexing_data`.
+Determines which transport requests are compressed before sending them to
+another node. {es} will compress transport responses if and only if the
+corresponding request was compressed. See also `transport.compression_scheme`,
+which specifies the compression scheme which is used. Accepts the following
+values:
++
+--
+`false`::
+
+No transport requests are compressed. This option uses the most network
+bandwidth, but avoids the CPU overhead of compression and decompression.
+
+`indexing_data`::
+
+Compresses only the raw indexing data sent between nodes during ingest, CCR
+following (excluding bootstrapping) and operations-based shard recovery
+(excluding file-based recovery which copies the raw Lucene data). This option
+is a good trade-off between network bandwidth savings and the extra CPU
+required for compression and decompression. This option is the default.
+
+`true`::
+
+All transport requests are compressed. This option may perform better than
+`indexing_data` in terms of network bandwidth, but will require the most CPU
+for compression and decompression work.
+--
+[[transport-settings-compression-scheme]]
`transport.compression_scheme`::
(<>, string)
-Configures the compression scheme for `transport.compress`. The options are
-`deflate` or `lz4`. If `lz4` is configured and the remote node has not been
-upgraded to a version supporting `lz4`, the traffic will be sent uncompressed.
-Defaults to `lz4`.
+Configures the compression scheme for requests which are selected for
+compression by to the `transport.compress` setting. Accepts either `deflate` or
+`lz4`, which offer different trade-offs between compression ratio and CPU
+usage. {es} will use the same compression scheme for responses as for the
+corresponding requests. Defaults to `lz4`.
`transport.tcp.keep_alive`::
(<>, boolean)
diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/quickstart/getting-started.asciidoc
similarity index 98%
rename from docs/reference/getting-started.asciidoc
rename to docs/reference/quickstart/getting-started.asciidoc
index 2a5dbc2f0d031..6b3095e07f9d4 100644
--- a/docs/reference/getting-started.asciidoc
+++ b/docs/reference/quickstart/getting-started.asciidoc
@@ -1,10 +1,9 @@
-[chapter]
[[getting-started]]
-= Quick start
+== Quick start guide
This guide helps you learn how to:
-* install and run {es} and {kib} (using {ecloud} or Docker),
+* Run {es} and {kib} (using {ecloud} or in a local Docker dev environment),
* add simple (non-timestamped) dataset to {es},
* run basic searches.
diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc
new file mode 100644
index 0000000000000..e517d039e620b
--- /dev/null
+++ b/docs/reference/quickstart/index.asciidoc
@@ -0,0 +1,10 @@
+[[quickstart]]
+= Quickstart
+
+Get started quickly with {es}.
+
+* Learn how to run {es} (and {kib}) for <>.
+* Follow our <> to add data to {es} and query it.
+
+include::run-elasticsearch-locally.asciidoc[]
+include::getting-started.asciidoc[]
diff --git a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc b/docs/reference/quickstart/run-elasticsearch-locally.asciidoc
new file mode 100644
index 0000000000000..0db395ba34b0a
--- /dev/null
+++ b/docs/reference/quickstart/run-elasticsearch-locally.asciidoc
@@ -0,0 +1,175 @@
+[[run-elasticsearch-locally]]
+== Run {es} locally in Docker (without security)
+++++
+Local dev setup (Docker)
+++++
+
+[WARNING]
+====
+*DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS*
+
+The instructions on this page are for *local development only*. Do not use these instructions for production deployments, because they are not secure.
+While this approach is convenient for experimenting and learning, you should never run the service in this way in a production environment.
+====
+
+The following commands help you very quickly spin up a single-node {es} cluster, together with {kib} in Docker.
+Note that if you don't need the {kib} UI, you can skip those instructions.
+
+[discrete]
+[[local-dev-why]]
+=== When would I use this setup?
+
+Use this setup if you want to quickly spin up {es} (and {kib}) for local development or testing.
+
+For example you might:
+
+* Want to run a quick test to see how a feature works.
+* Follow a tutorial or guide that requires an {es} cluster, like our <>.
+* Experiment with the {es} APIs using different tools, like the Dev Tools Console, cURL, or an Elastic programming language client.
+* Quickly spin up an {es} cluster to test an executable https://github.com/elastic/elasticsearch-labs/tree/main/notebooks#readme[Python notebook] locally.
+
+[discrete]
+[[local-dev-prerequisites]]
+=== Prerequisites
+
+If you don't have Docker installed, https://www.docker.com/products/docker-desktop[download and install Docker Desktop] for your operating system.
+
+[discrete]
+[[local-dev-env-vars]]
+=== Set environment variables
+
+Configure the following environment variables.
+
+[source,sh]
+----
+export ELASTIC_PASSWORD="" # password for "elastic" username
+export KIBANA_PASSWORD="" # Used _internally_ by Kibana, must be at least 6 characters long
+----
+
+[discrete]
+[[local-dev-create-docker-network]]
+=== Create a Docker network
+
+To run both {es} and {kib}, you'll need to create a Docker network:
+
+[source,sh]
+----
+docker network create elastic-net
+----
+
+[discrete]
+[[local-dev-run-es]]
+=== Run {es}
+
+Start the {es} container with the following command:
+
+ifeval::["{release-state}"=="unreleased"]
+WARNING: Version {version} has not yet been released.
+No Docker image is currently available for {es} {version}.
+endif::[]
+
+[source,sh,subs="attributes"]
+----
+docker run -p 127.0.0.1:9200:9200 -d --name elasticsearch --network elastic-net \
+ -e ELASTIC_PASSWORD=$ELASTIC_PASSWORD \
+ -e "discovery.type=single-node" \
+ -e "xpack.security.http.ssl.enabled=false" \
+ -e "xpack.license.self_generated.type=trial" \
+ {docker-image}
+----
+
+[discrete]
+[[local-dev-run-kib]]
+=== Run {kib} (optional)
+
+To run {kib}, you must first set the `kibana_system` password in the {es} container.
+
+[source,sh,subs="attributes"]
+----
+# configure the Kibana password in the ES container
+curl -u elastic:$ELASTIC_PASSWORD \
+ -X POST \
+ http://localhost:9200/_security/user/kibana_system/_password \
+ -d '{"password":"'"$KIBANA_PASSWORD"'"}' \
+ -H 'Content-Type: application/json'
+----
+// NOTCONSOLE
+
+Start the {kib} container with the following command:
+
+ifeval::["{release-state}"=="unreleased"]
+WARNING: Version {version} has not yet been released.
+No Docker image is currently available for {es} {version}.
+endif::[]
+
+[source,sh,subs="attributes"]
+----
+docker run -p 127.0.0.1:5601:5601 -d --name kibana --network elastic-net \
+ -e ELASTICSEARCH_URL=http://elasticsearch:9200 \
+ -e ELASTICSEARCH_HOSTS=http://elasticsearch:9200 \
+ -e ELASTICSEARCH_USERNAME=kibana_system \
+ -e ELASTICSEARCH_PASSWORD=$KIBANA_PASSWORD \
+ -e "xpack.security.enabled=false" \
+ -e "xpack.license.self_generated.type=trial" \
+ {kib-docker-image}
+----
+
+[NOTE]
+====
+The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page].
+====
+
+[discrete]
+[[local-dev-connecting-clients]]
+== Connecting to {es} with language clients
+
+To connect to the {es} cluster from a language client, you can use basic authentication with the `elastic` username and the password you set in the environment variable.
+
+You'll use the following connection details:
+
+* **{es} endpoint**: `http://localhost:9200`
+* **Username**: `elastic`
+* **Password**: `$ELASTIC_PASSWORD` (Value you set in the environment variable)
+
+For example, to connect with the Python `elasticsearch` client:
+
+[source,python]
+----
+import os
+from elasticsearch import Elasticsearch
+
+username = 'elastic'
+password = os.getenv('ELASTIC_PASSWORD') # Value you set in the environment variable
+
+client = Elasticsearch(
+ "http://localhost:9200",
+ basic_auth=(username, password)
+)
+
+print(client.info())
+----
+
+Here's an example curl command using basic authentication:
+
+[source,sh,subs="attributes"]
+----
+curl -u elastic:$ELASTIC_PASSWORD \
+ -X PUT \
+ http://localhost:9200/my-new-index \
+ -H 'Content-Type: application/json'
+----
+// NOTCONSOLE
+
+[discrete]
+[[local-dev-next-steps]]
+=== Next steps
+
+Use our <> to learn the basics of {es}: how to add data and query it.
+
+[discrete]
+[[local-dev-production]]
+=== Moving to production
+
+This setup is not suitable for production use. For production deployments, we recommend using our managed service on Elastic Cloud. https://cloud.elastic.co/registration[Sign up for a free trial] (no credit card required).
+
+Otherwise, refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Install {es}] to learn about the various options for installing {es} in a self-managed production environment, including using Docker.
diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc
index dd264c0e5bcd2..a2a397c4efe65 100644
--- a/docs/reference/rest-api/common-parms.asciidoc
+++ b/docs/reference/rest-api/common-parms.asciidoc
@@ -1062,8 +1062,8 @@ end::stats[]
tag::stored_fields[]
`stored_fields`::
-(Optional, Boolean) If `true`, retrieves the document fields stored in the
-index rather than the document `_source`. Defaults to `false`.
+(Optional, string)
+A comma-separated list of <> to include in the response.
end::stored_fields[]
tag::sync[]
diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc
index 6bdfaab17a4d0..0d21f648ab58b 100644
--- a/docs/reference/rest-api/usage.asciidoc
+++ b/docs/reference/rest-api/usage.asciidoc
@@ -308,7 +308,8 @@ GET /_xpack/usage
},
"rollup" : {
"available" : true,
- "enabled" : true
+ "enabled" : true,
+ ...
},
"ilm" : {
"policy_count" : 3,
@@ -496,6 +497,7 @@ GET /_xpack/usage
}
------------------------------------------------------------
// TESTRESPONSE[s/"security" : \{[^\}]*\},/"security" : $body.$_path,/]
+// TESTRESPONSE[s/"rollup" : \{[^\}]*\},/"rollup" : $body.$_path,/]
// TESTRESPONSE[s/"detectors" : \{[^\}]*\},/"detectors" : $body.$_path,/]
// TESTRESPONSE[s/"model_size" : \{[^\}]*\},/"model_size" : $body.$_path,/]
// TESTRESPONSE[s/"eql" : \{[^\}]*\},/"eql" : $body.$_path,/]
diff --git a/docs/reference/search/search-your-data/cohere-es.asciidoc b/docs/reference/search/search-your-data/cohere-es.asciidoc
new file mode 100644
index 0000000000000..f12f23ad2c5dc
--- /dev/null
+++ b/docs/reference/search/search-your-data/cohere-es.asciidoc
@@ -0,0 +1,372 @@
+[[cohere-es]]
+=== Tutorial: Using Cohere with {es}
+++++
+Using Cohere with {es}
+++++
+
+The instructions in this tutorial shows you how to compute embeddings with
+Cohere using the {infer} API and store them for efficient vector or hybrid
+search in {es}. This tutorial will use the Python {es} client to perform the
+operations.
+
+You'll learn how to:
+
+* create an {infer} endpoint for text embedding using the Cohere service,
+* create the necessary index mapping for the {es} index,
+* build an {infer} pipeline to ingest documents into the index together with the
+embeddings,
+* perform hybrid search on the data,
+* rerank search results by using Cohere's rerank model,
+* design a RAG system with Cohere's Chat API.
+
+The tutorial uses the https://huggingface.co/datasets/mteb/scifact[SciFact] data
+set.
+
+Refer to https://docs.cohere.com/docs/elasticsearch-and-cohere[Cohere's tutorial]
+for an example using a different data set.
+
+
+[discrete]
+[[cohere-es-req]]
+==== Requirements
+
+* A https://cohere.com/[Cohere account],
+* an https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html[Elastic Cloud]
+account,
+* Python 3.7 or higher.
+
+
+[discrete]
+[[cohere-es-packages]]
+==== Install required packages
+
+Install {es} and Cohere:
+
+[source,py]
+------------------------------------------------------------
+!pip install elasticsearch
+!pip install cohere
+------------------------------------------------------------
+
+Import the required packages:
+
+[source,py]
+------------------------------------------------------------
+from elasticsearch import Elasticsearch, helpers
+import cohere
+import json
+import requests
+------------------------------------------------------------
+
+[discrete]
+[[cohere-es-client]]
+==== Create the {es} client
+
+To create your {es} client, you need:
+
+* https://www.elastic.co/search-labs/tutorials/install-elasticsearch/elastic-cloud#finding-your-cloud-id[your Cloud ID],
+* https://www.elastic.co/search-labs/tutorials/install-elasticsearch/elastic-cloud#creating-an-api-key[an encoded API key].
+
+[source,py]
+------------------------------------------------------------
+ELASTICSEARCH_ENDPOINT = "elastic_endpoint"
+ELASTIC_API_KEY = "elastic_api_key"
+
+client = Elasticsearch(
+ cloud_id=ELASTICSEARCH_ENDPOINT,
+ api_key=ELASTIC_API_KEY
+)
+
+# Confirm the client has connected
+print(client.info())
+------------------------------------------------------------
+
+
+[discrete]
+[[cohere-es-infer-endpoint]]
+==== Create the {infer} endpoint
+
+<> first. In this example, the
+{infer} endpoint uses Cohere's `embed-english-v3.0` model and the
+`embedding_type` is set to `byte`.
+
+[source,py]
+------------------------------------------------------------
+COHERE_API_KEY = "cohere_api_key"
+
+client.inference.put_model(
+ task_type="text_embedding",
+ inference_id="cohere_embeddings",
+ body={
+ "service": "cohere",
+ "service_settings": {
+ "api_key": COHERE_API_KEY,
+ "model_id": "embed-english-v3.0",
+ "embedding_type": "byte"
+ }
+ },
+)
+------------------------------------------------------------
+
+You can find your API keys in your Cohere dashboard under the
+https://dashboard.cohere.com/api-keys[API keys section].
+
+
+[discrete]
+[[cohere-es-index-mapping]]
+==== Create the index mapping
+
+Create the index mapping for the index that will contain the embeddings.
+
+[source,py]
+------------------------------------------------------------
+client.indices.create(
+ index="cohere-embeddings",
+ settings={"index": {"default_pipeline": "cohere_embeddings"}},
+ mappings={
+ "properties": {
+ "text_embedding": {
+ "type": "dense_vector",
+ "dims": 1024,
+ "element_type": "byte",
+ },
+ "text": {"type": "text"},
+ "id": {"type": "integer"},
+ "title": {"type": "text"}
+ }
+ },
+)
+------------------------------------------------------------
+
+
+[discrete]
+[[cohere-es-infer-pipeline]]
+==== Create the {infer} pipeline
+
+Now you have an {infer} endpoint and an index ready to store embeddings. The
+next step is to create an <> with an
+<> that will create the embeddings using
+the {infer} endpoint and stores them in the index.
+
+[source,py]
+--------------------------------------------------
+client.ingest.put_pipeline(
+ id="cohere_embeddings",
+ description="Ingest pipeline for Cohere inference.",
+ processors=[
+ {
+ "inference": {
+ "model_id": "cohere_embeddings",
+ "input_output": {
+ "input_field": "text",
+ "output_field": "text_embedding",
+ },
+ }
+ }
+ ],
+)
+--------------------------------------------------
+
+
+[discrete]
+[[cohere-es-insert-documents]]
+==== Prepare data and insert documents
+
+This example uses the https://huggingface.co/datasets/mteb/scifact[SciFact] data
+set that you can find on HuggingFace.
+
+[source,py]
+--------------------------------------------------
+url = 'https://huggingface.co/datasets/mteb/scifact/raw/main/corpus.jsonl'
+
+# Fetch the JSONL data from the URL
+response = requests.get(url)
+response.raise_for_status() # Ensure noticing bad responses
+
+# Split the content by new lines and parse each line as JSON
+data = [json.loads(line) for line in response.text.strip().split('\n') if line]
+# Now data is a list of dictionaries
+
+# Change `_id` key to `id` as `_id` is a reserved key in Elasticsearch.
+for item in data:
+ if '_id' in item:
+ item['id'] = item.pop('_id')
+
+# Prepare the documents to be indexed
+documents = []
+for line in data:
+ data_dict = line
+ documents.append({
+ "_index": "cohere-embeddings",
+ "_source": data_dict,
+ }
+ )
+
+# Use the bulk endpoint to index
+helpers.bulk(client, documents)
+
+print("Data ingestion completed, text embeddings generated!")
+--------------------------------------------------
+
+Your index is populated with the SciFact data and text embeddings for the text
+field.
+
+
+[discrete]
+[[cohere-es-hybrid-search]]
+==== Hybrid search
+
+Let's start querying the index!
+
+The code below performs a hybrid search. The `kNN` query computes the relevance
+of search results based on vector similarity using the `text_embedding` field,
+the lexical search query uses BM25 retrieval to compute keyword similarity on
+the `title` and `text` fields.
+
+[source,py]
+--------------------------------------------------
+query = "What is biosimilarity?"
+
+response = client.search(
+ index="cohere-embeddings",
+ size=100,
+ knn={
+ "field": "text_embedding",
+ "query_vector_builder": {
+ "text_embedding": {
+ "model_id": "cohere_embeddings",
+ "model_text": query,
+ }
+ },
+ "k": 10,
+ "num_candidates": 50,
+ },
+ query={
+ "multi_match": {
+ "query": query,
+ "fields": ["text", "title"]
+ }
+ }
+)
+
+raw_documents = response["hits"]["hits"]
+
+# Display the first 10 results
+for document in raw_documents[0:10]:
+ print(f'Title: {document["_source"]["title"]}\nText: {document["_source"]["text"]}\n')
+
+# Format the documents for ranking
+documents = []
+for hit in response["hits"]["hits"]:
+ documents.append(hit["_source"]["text"])
+--------------------------------------------------
+
+
+[discrete]
+[[cohere-es-rerank-results]]
+===== Rerank search results
+
+To combine the results more effectively, use
+https://docs.cohere.com/docs/rerank-2[Cohere's Rerank v3] model through the
+{infer} API to provide a more precise semantic reranking of the results.
+
+Create an {infer} endpoint with your Cohere API key and the used model name as
+the `model_id` (`rerank-english-v3.0` in this example).
+
+[source,py]
+--------------------------------------------------
+client.inference.put_model(
+ task_type="rerank",
+ inference_id="cohere_rerank",
+ body={
+ "service": "cohere",
+ "service_settings":{
+ "api_key": COHERE_API_KEY,
+ "model_id": "rerank-english-v3.0"
+ },
+ "task_settings": {
+ "top_n": 10,
+ },
+ }
+)
+--------------------------------------------------
+
+Rerank the results using the new {infer} endpoint.
+
+[source,py]
+--------------------------------------------------
+# Pass the query and the search results to the service
+response = client.inference.inference(
+ inference_id="cohere_rerank",
+ body={
+ "query": query,
+ "input": documents,
+ "task_settings": {
+ "return_documents": False
+ }
+ }
+)
+
+# Reconstruct the input documents based on the index provided in the rereank response
+ranked_documents = []
+for document in response.body["rerank"]:
+ ranked_documents.append({
+ "title": raw_documents[int(document["index"])]["_source"]["title"],
+ "text": raw_documents[int(document["index"])]["_source"]["text"]
+ })
+
+# Print the top 10 results
+for document in ranked_documents[0:10]:
+ print(f"Title: {document['title']}\nText: {document['text']}\n")
+--------------------------------------------------
+
+The response is a list of documents in descending order of relevance. Each
+document has a corresponding index that reflects the order of the documents when
+they were sent to the {infer} endpoint.
+
+
+[discrete]
+[[cohere-es-rag]]
+==== Retrieval Augmented Generation (RAG) with Cohere and {es}
+
+RAG is a method for generating text using additional information fetched from an
+external data source. With the ranked results, you can build a RAG system on the
+top of what you previously created by using
+https://docs.cohere.com/docs/chat-api[Cohere's Chat API].
+
+Pass in the retrieved documents and the query to receive a grounded response
+using Cohere's newest generative model
+https://docs.cohere.com/docs/command-r-plus[Command R+].
+
+Then pass in the query and the documents to the Chat API, and print out the
+response.
+
+[source,py]
+--------------------------------------------------
+response = co.chat(message=query, documents=ranked_documents, model='command-r-plus')
+
+source_documents = []
+for citation in response.citations:
+ for document_id in citation.document_ids:
+ if document_id not in source_documents:
+ source_documents.append(document_id)
+
+print(f"Query: {query}")
+print(f"Response: {response.text}")
+print("Sources:")
+for document in response.documents:
+ if document['id'] in source_documents:
+ print(f"{document['title']}: {document['text']}")
+
+--------------------------------------------------
+
+The response will look similar to this:
+
+[source,consol-result]
+--------------------------------------------------
+Query: What is biosimilarity?
+Response: Biosimilarity is based on the comparability concept, which has been used successfully for several decades to ensure close similarity of a biological product before and after a manufacturing change. Over the last 10 years, experience with biosimilars has shown that even complex biotechnology-derived proteins can be copied successfully.
+Sources:
+Interchangeability of Biosimilars: A European Perspective: (...)
+--------------------------------------------------
+// NOTCONSOLE
diff --git a/docs/reference/search/search-your-data/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-overview.asciidoc
new file mode 100644
index 0000000000000..fdd984819558b
--- /dev/null
+++ b/docs/reference/search/search-your-data/retrievers-overview.asciidoc
@@ -0,0 +1,207 @@
+[[retrievers-overview]]
+== Retrievers
+
+// Will move to a top level "Retrievers and reranking" section once reranking is live
+
+preview::[]
+
+A retriever is an abstraction that was added to the Search API in *8.14.0*.
+This abstraction enables the configuration of multi-stage retrieval
+pipelines within a single `_search` call. This simplifies your search
+application logic, because you no longer need to configure complex searches via
+multiple {es} calls or implement additional client-side logic to
+combine results from different queries.
+
+This document provides a general overview of the retriever abstraction.
+For implementation details, including notable restrictions, check out the
+<> in the `_search` API docs.
+
+[discrete]
+[[retrievers-overview-types]]
+=== Retriever types
+
+Retrievers come in various types, each tailored for different search operations.
+The following retrievers are currently available:
+
+* <>. Returns top documents from a
+traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query].
+Mimics a traditional query but in the context of a retriever framework. This
+ensures backward compatibility as existing `_search` requests remain supported.
+That way you can transition to the new abstraction at your own pace without
+mixing syntaxes.
+* <>. Returns top documents from a <>,
+in the context of a retriever framework.
+* <>. Combines and ranks multiple first-stage retrievers using
+the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets
+with different relevance indicators into a single result set.
+An RRF retriever is a *compound retriever*, where its `filter` element is
+propagated to its sub retrievers.
++
+Sub retrievers may not use elements that
+are restricted by having a compound retriever as part of the retriever tree.
+See the <> for detailed
+examples and information on how to use the RRF retriever.
+
+[NOTE]
+====
+Stay tuned for more retriever types in future releases!
+====
+
+[discrete]
+=== What makes retrievers useful?
+
+Here's an overview of what makes retrievers useful and how they differ from
+regular queries.
+
+. *Simplified user experience*. Retrievers simplify the user experience by
+allowing entire retrieval pipelines to be configured in a single API call. This
+maintains backward compatibility with traditional query elements by
+automatically translating them to the appropriate retriever.
+. *Structured retrieval*. Retrievers provide a more structured way to define search
+operations. They allow searches to be described using a "retriever tree", a
+hierarchical structure that clarifies the sequence and logic of operations,
+making complex searches more understandable and manageable.
+. *Composability and flexibility*. Retrievers enable flexible composability,
+allowing you to build pipelines and seamlessly integrate different retrieval
+strategies into these pipelines. Retrievers make it easy to test out different
+retrieval strategy combinations.
+. *Compound operations*. A retriever can have sub retrievers. This
+allows complex nested searches where the results of one retriever feed into
+another, supporting sophisticated querying strategies that might involve
+multiple stages or criteria.
+. *Retrieval as a first-class concept*. Unlike
+traditional queries, where the query is a part of a larger search API call,
+retrievers are designed as standalone entities that can be combined or used in
+isolation. This enables a more modular and flexible approach to constructing
+searches.
+. *Enhanced control over document scoring and ranking*. Retrievers
+allow for more explicit control over how documents are scored and filtered. For
+instance, you can specify minimum score thresholds, apply complex filters
+without affecting scoring, and use parameters like `terminate_after` for
+performance optimizations.
+. *Integration with existing {es} functionalities*. Even though
+retrievers can be used instead of existing `_search` API syntax (like the
+`query` and `knn`), they are designed to integrate seamlessly with things like
+pagination (`search_after`) and sorting. They also maintain compatibility with
+aggregation operations by treating the combination of all leaf retrievers as
+`should` clauses in a boolean query.
+. *Cleaner separation of concerns*. When using compound retrievers, only the
+query element is allowed, which enforces a cleaner separation of concerns
+and prevents the complexity that might arise from overly nested or
+interdependent configurations.
+
+[discrete]
+[[retrievers-overview-example]]
+=== Example
+
+The following example demonstrates how using retrievers
+simplify the composability of queries for RRF ranking.
+
+[source,js]
+----
+GET example-index/_search
+{
+ "retriever": {
+ "rrf": {
+ "retrievers": [
+ {
+ "standard": {
+ "query": {
+ "text_expansion": {
+ "vector.tokens": {
+ "model_id": ".elser_model_2",
+ "model_text": "What blue shoes are on sale?"
+ }
+ }
+ }
+ }
+ },
+ {
+ "standard": {
+ "query": {
+ "match": {
+ "text": "blue shoes sale"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+----
+//NOTCONSOLE
+
+This example demonstrates how you can combine different
+retrieval strategies into a single `retriever` pipeline.
+
+Compare to `RRF` with `sub_searches` approach:
+
+.*Expand* for example
+[%collapsible]
+==============
+
+[source,js]
+----
+GET example-index/_search
+{
+ "sub_searches":[
+ {
+ "query":{
+ "match":{
+ "text":"blue shoes sale"
+ }
+ }
+ },
+ {
+ "query":{
+ "text_expansion":{
+ "vector.tokens":{
+ "model_id":".elser_model_2",
+ "model_text":"What blue shoes are on sale?"
+ }
+ }
+ }
+ }
+ ],
+ "rank":{
+ "rrf":{
+ "window_size":50,
+ "rank_constant":20
+ }
+ }
+}
+----
+//NOTCONSOLE
+==============
+
+[discrete]
+[[retrievers-overview-glossary]]
+=== Glossary
+
+Here are some important terms:
+
+* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to
+produce top hits.
+* *Retriever Tree*. A hierarchical structure that defines how retrievers interact.
+* *First-stage Retriever*. Returns an initial set of candidate documents.
+* *Compound Retriever*. Builds on one or more retrievers,
+enhancing document retrieval and ranking logic.
+* *Combiners*. Compound retrievers that merge top hits
+from multiple sub-retrievers.
+//* NOT YET *Rerankers*. Special compound retrievers that reorder hits and may adjust the number of hits, with distinctions between first-stage and second-stage rerankers.
+
+[discrete]
+[[retrievers-overview-play-in-search]]
+=== Retrievers in action
+
+The Search Playground builds Elasticsearch queries using the retriever abstraction.
+It automatically detects the fields and types in your index and builds a retriever tree based on your selections.
+
+You can use the Playground to experiment with different retriever configurations and see how they affect search results.
+
+Refer to the {kibana-ref}/playground.html[Playground documentation] for more information.
+// Content coming in https://github.com/elastic/kibana/pull/182692
+
+
+
diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc
index bed204985296c..e1c1618410f2f 100644
--- a/docs/reference/search/search-your-data/search-your-data.asciidoc
+++ b/docs/reference/search/search-your-data/search-your-data.asciidoc
@@ -43,10 +43,11 @@ DSL, with a simplified user experience. Create search applications based on your
results directly in the Kibana Search UI.
include::search-api.asciidoc[]
-include::search-application-overview.asciidoc[]
include::knn-search.asciidoc[]
include::semantic-search.asciidoc[]
+include::retrievers-overview.asciidoc[]
include::learning-to-rank.asciidoc[]
include::search-across-clusters.asciidoc[]
include::search-with-synonyms.asciidoc[]
+include::search-application-overview.asciidoc[]
include::behavioral-analytics/behavioral-analytics-overview.asciidoc[]
diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc
index a4d892c98645b..a1197e7bbbd3a 100644
--- a/docs/reference/search/search-your-data/semantic-search.asciidoc
+++ b/docs/reference/search/search-your-data/semantic-search.asciidoc
@@ -136,3 +136,4 @@ include::{es-ref-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc[
include::semantic-search-elser.asciidoc[]
include::semantic-search-inference.asciidoc[]
+include::cohere-es.asciidoc[]
diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc
index 4a56961246c2b..794496c8b24ad 100644
--- a/docs/reference/searchable-snapshots/index.asciidoc
+++ b/docs/reference/searchable-snapshots/index.asciidoc
@@ -310,9 +310,9 @@ of {search-snap} indices.
The sole copy of the data in a {search-snap} index is the underlying snapshot,
stored in the repository. For example:
-* You cannot unregister a repository while any of the searchable snapshots it
-contains are mounted in {es}. You also cannot delete a snapshot if any of its
-indices are mounted as a searchable snapshot in the same cluster.
+* You must not unregister a repository while any of the searchable snapshots it
+contains are mounted in {es}. You also must not delete a snapshot if any of its
+indices are mounted as searchable snapshots.
* If you mount indices from snapshots held in a repository to which a different
cluster has write access then you must make sure that the other cluster does not
diff --git a/docs/reference/settings/inference-settings.asciidoc b/docs/reference/settings/inference-settings.asciidoc
new file mode 100644
index 0000000000000..fa0905cf0ef73
--- /dev/null
+++ b/docs/reference/settings/inference-settings.asciidoc
@@ -0,0 +1,92 @@
+
+[role="xpack"]
+[[inference-settings]]
+=== Inference API settings in {es}
+++++
+Inference settings
+++++
+
+[[inference-settings-description]]
+// tag::inference-settings-description-tag[]
+You do not need to configure any settings to use the {infer} APIs. Each setting has a default.
+// end::inference-settings-description-tag[]
+
+[discrete]
+[[xpack-inference-logging]]
+// tag::inference-logging[]
+==== Inference API logging settings
+
+When certain failures occur, a log message is emitted. In the case of a
+reoccurring failure the logging throttler restricts repeated messages from being logged.
+
+`xpack.inference.logging.reset_interval`::
+(<>) Specifies the interval for when a cleanup thread will clear an internal
+cache of the previously logged messages. Defaults to one day (`1d`).
+
+`xpack.inference.logging.wait_duration`::
+(<>) Specifies the amount of time to wait after logging a message before that
+message can be logged again. Defaults to one hour (`1h`).
+// end::inference-logging[]
+
+[[xpack-inference-http-settings]]
+// tag::inference-http-settings[]
+==== {infer-cap} API HTTP settings
+
+`xpack.inference.http.max_response_size`::
+(<>) Specifies the maximum size in bytes an HTTP response is allowed to have,
+defaults to `10mb`, the maximum configurable value is `50mb`.
+
+`xpack.inference.http.max_total_connections`::
+(<>) Specifies the maximum number of connections the internal connection pool can
+lease. Defaults to `50`.
+
+`xpack.inference.http.max_route_connections`::
+(<>) Specifies the maximum number of connections a single route can lease from
+the internal connection pool. If this setting is set to a value equal to or greater than
+`xpack.inference.http.max_total_connections`, then a single third party service could lease all available
+connections and other third party services would be unable to lease connections. Defaults to `20`.
+
+`xpack.inference.http.connection_eviction_interval`::
+(<>) Specifies the interval that an eviction thread will run to remove expired and
+stale connections from the internal connection pool. Decreasing this time value can help improve throughput if
+multiple third party service are contending for the available connections in the pool. Defaults to one minute (`1m`).
+
+`xpack.inference.http.connection_eviction_max_idle_time`::
+(<>) Specifies the maximum duration a connection can be unused before it is marked as
+idle and can be closed and removed from the shared connection pool. Defaults to one minute (`1m`).
+
+`xpack.inference.http.request_executor.queue_capacity`::
+(<>) Specifies the size of the internal queue for requests waiting to be sent. If
+the queue is full and a request is sent to the {infer} API, it will be rejected. Defaults to `2000`.
+
+[[xpack-inference-http-retry-settings]]
+==== {infer-cap} API HTTP Retry settings
+
+When a third-party service returns a transient failure code (for example, 429), the request is retried by the {infer}
+API. These settings govern the retry behavior. When a request is retried, exponential backoff is used.
+
+`xpack.inference.http.retry.initial_delay`::
+(<>) Specifies the initial delay before retrying a request. Defaults to one second
+(`1s`).
+
+`xpack.inference.http.retry.max_delay_bound`::
+(<>) Specifies the maximum delay for a request. Defaults to five seconds (`5s`).
+
+`xpack.inference.http.retry.timeout`::
+(<>) Specifies the maximum amount of time a request can be retried.
+Once the request exceeds this time, the request will no longer be retried and a failure will be returned.
+Defaults to 30 seconds (`30s`).
+// end::inference-logging[]
+
+[[xpack-inference-input-text]]
+// tag::inference-input-text[]
+==== {infer-cap} API Input text
+
+For certain third-party service integrations, when the service returns an error indicating that the request
+input was too large, the input will be truncated and the request is retried. These settings govern
+how the truncation is performed.
+
+`xpack.inference.truncator.reduction_percentage`::
+(<>) Specifies the percentage to reduce the input text by if the 3rd party service
+responds with an error indicating it is too long. Defaults to 50 percent (`0.5`).
+// end::inference-input-text[]
diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc
index e007b67a943b0..64626aafb2441 100644
--- a/docs/reference/setup.asciidoc
+++ b/docs/reference/setup.asciidoc
@@ -29,8 +29,6 @@ resource-heavy {ls} deployment should be on its own host.
include::setup/install.asciidoc[]
-include::setup/run-elasticsearch-locally.asciidoc[]
-
include::setup/configuration.asciidoc[]
include::setup/important-settings.asciidoc[]
@@ -70,6 +68,8 @@ include::setup/logging-config.asciidoc[]
include::settings/ml-settings.asciidoc[]
+include::settings/inference-settings.asciidoc[]
+
include::settings/monitoring-settings.asciidoc[]
include::modules/node.asciidoc[]
diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc
index 49501c46b8ba9..89373d0ce8d44 100644
--- a/docs/reference/setup/install.asciidoc
+++ b/docs/reference/setup/install.asciidoc
@@ -20,7 +20,7 @@ If you want to install and manage {es} yourself, you can:
* Run {es} in a <>.
* Set up and manage {es}, {kib}, {agent}, and the rest of the Elastic Stack on Kubernetes with {eck-ref}[{eck}].
-TIP: To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see <>.
+TIP: To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see <>. Please note that this setup is *not suitable for production use*.
[discrete]
[[elasticsearch-install-packages]]
diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc
index 0c518d520bdd5..370fc5c4ccf7e 100644
--- a/docs/reference/setup/install/docker.asciidoc
+++ b/docs/reference/setup/install/docker.asciidoc
@@ -8,6 +8,12 @@ https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[GitHu
include::license.asciidoc[]
+[TIP]
+====
+If you just want to test {es} in local development, refer to <>.
+Please note that this setup is not suitable for production environments.
+====
+
[[docker-cli-run-dev-mode]]
==== Run {es} in Docker
diff --git a/docs/reference/setup/run-elasticsearch-locally.asciidoc b/docs/reference/setup/run-elasticsearch-locally.asciidoc
deleted file mode 100644
index a6e6d5c8963a2..0000000000000
--- a/docs/reference/setup/run-elasticsearch-locally.asciidoc
+++ /dev/null
@@ -1,183 +0,0 @@
-[[run-elasticsearch-locally]]
-== Run Elasticsearch locally
-
-////
-IMPORTANT: This content is replicated in the Elasticsearch repo
-README.ascidoc file. If you make changes, you must also update the
-Elasticsearch README.
-+
-GitHub renders the tagged region directives when you view the README,
-so it's not possible to just include the content from the README. Darn.
-+
-Also note that there are similar instructions in the Kibana guide:
-https://www.elastic.co/guide/en/kibana/current/docker.html
-////
-
-To try out Elasticsearch on your own machine, we recommend using Docker
-and running both Elasticsearch and Kibana.
-Docker images are available from the https://www.docker.elastic.co[Elastic Docker registry].
-
-NOTE: Starting in Elasticsearch 8.0, security is enabled by default.
-The first time you start Elasticsearch, TLS encryption is configured automatically,
-a password is generated for the `elastic` user,
-and a Kibana enrollment token is created so you can connect Kibana to your secured cluster.
-
-For other installation options, see the
-https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation documentation].
-
-[discrete]
-=== Start Elasticsearch
-
-. Install and start https://www.docker.com/products/docker-desktop[Docker
-Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at least 4GB.
-
-. Start an Elasticsearch container:
-ifeval::["{release-state}"=="unreleased"]
-+
-WARNING: Version {version} of {es} has not yet been released, so no
-Docker image is currently available for this version.
-endif::[]
-+
-[source,sh,subs="attributes"]
-----
-docker network create elastic
-docker pull docker.elastic.co/elasticsearch/elasticsearch:{version}
-docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version}
-----
-+
-When you start Elasticsearch for the first time, the generated `elastic` user password and
-Kibana enrollment token are output to the terminal.
-+
-NOTE: You might need to scroll back a bit in the terminal to view the password
-and enrollment token.
-
-. Copy the generated password and enrollment token and save them in a secure
-location. These values are shown only when you start Elasticsearch for the first time.
-You'll use these to enroll Kibana with your Elasticsearch cluster and log in.
-
-[discrete]
-=== Start Kibana
-
-Kibana enables you to easily send requests to Elasticsearch and analyze, visualize, and manage data interactively.
-
-. In a new terminal session, start Kibana and connect it to your Elasticsearch container:
-ifeval::["{release-state}"=="unreleased"]
-+
-WARNING: Version {version} of {kib} has not yet been released, so no
-Docker image is currently available for this version.
-endif::[]
-+
-[source,sh,subs="attributes"]
-----
-docker pull docker.elastic.co/kibana/kibana:{version}
-docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version}
-----
-+
-When you start Kibana, a unique URL is output to your terminal.
-
-. To access Kibana, open the generated URL in your browser.
-
- .. Paste the enrollment token that you copied when starting
- Elasticsearch and click the button to connect your Kibana instance with Elasticsearch.
-
- .. Log in to Kibana as the `elastic` user with the password that was generated
- when you started Elasticsearch.
-
-[discrete]
-=== Send requests to Elasticsearch
-
-You send data and other requests to Elasticsearch through REST APIs.
-You can interact with Elasticsearch using any client that sends HTTP requests,
-such as the https://www.elastic.co/guide/en/elasticsearch/client/index.html[Elasticsearch
-language clients] and https://curl.se[curl].
-Kibana's developer console provides an easy way to experiment and test requests.
-To access the console, go to **Management > Dev Tools**.
-
-[discrete]
-=== Add data
-
-You index data into Elasticsearch by sending JSON objects (documents) through the REST APIs.
-Whether you have structured or unstructured text, numerical data, or geospatial data,
-Elasticsearch efficiently stores and indexes it in a way that supports fast searches.
-
-For timestamped data such as logs and metrics, you typically add documents to a
-data stream made up of multiple auto-generated backing indices.
-
-To add a single document to an index, submit an HTTP post request that targets the index.
-
-[source,console]
-----
-POST /customer/_doc/1
-{
- "firstname": "Jennifer",
- "lastname": "Walters"
-}
-----
-
-This request automatically creates the `customer` index if it doesn't exist,
-adds a new document that has an ID of 1, and
-stores and indexes the `firstname` and `lastname` fields.
-
-The new document is available immediately from any node in the cluster.
-You can retrieve it with a GET request that specifies its document ID:
-
-[source,console]
-----
-GET /customer/_doc/1
-----
-// TEST[continued]
-
-To add multiple documents in one request, use the `_bulk` API.
-Bulk data must be newline-delimited JSON (NDJSON).
-Each line must end in a newline character (`\n`), including the last line.
-
-[source,console]
-----
-PUT customer/_bulk
-{ "create": { } }
-{ "firstname": "Monica","lastname":"Rambeau"}
-{ "create": { } }
-{ "firstname": "Carol","lastname":"Danvers"}
-{ "create": { } }
-{ "firstname": "Wanda","lastname":"Maximoff"}
-{ "create": { } }
-{ "firstname": "Jennifer","lastname":"Takeda"}
-----
-// TEST[continued]
-
-[discrete]
-=== Search
-
-Indexed documents are available for search in near real-time.
-The following search matches all customers with a first name of _Jennifer_
-in the `customer` index.
-
-[source,console]
-----
-GET customer/_search
-{
- "query" : {
- "match" : { "firstname": "Jennifer" }
- }
-}
-----
-// TEST[continued]
-
-[discrete]
-=== Explore
-
-You can use Discover in Kibana to interactively search and filter your data.
-From there, you can start creating visualizations and building and sharing dashboards.
-
-To get started, create a _data view_ that connects to one or more Elasticsearch indices,
-data streams, or index aliases.
-
-. Go to **Management > Stack Management > Kibana > Data Views**.
-. Select **Create data view**.
-. Enter a name for the data view and a pattern that matches one or more indices,
-such as _customer_.
-. Select **Save data view to Kibana**.
-
-To start exploring, go to **Analytics > Discover**.
-
-
diff --git a/docs/reference/tab-widgets/api-call-widget.asciidoc b/docs/reference/tab-widgets/api-call-widget.asciidoc
index adc2aa86f1c0e..4ad3c45366434 100644
--- a/docs/reference/tab-widgets/api-call-widget.asciidoc
+++ b/docs/reference/tab-widgets/api-call-widget.asciidoc
@@ -12,7 +12,7 @@
aria-controls="self-managed-tab-api-call"
id="self-managed-api-call"
tabindex="-1">
- Self-managed
+ Local Dev (Docker)
- Elasticsearch Service
+ Elastic Cloud
> for advanced Docker documentation.
-
-. Run the following Docker commands:
-+
-[source,sh,subs="attributes"]
-----
-docker network create elastic
-docker pull {docker-image}
-docker run --name es01 --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t {docker-image}
-----
-
-. Copy the generated `elastic` password and enrollment token, which are output to your terminal.
-You'll use these to enroll {kib} with your {es} cluster and log in.
-These credentials are only shown when you start {es} for the first time.
-+
-We recommend storing the `elastic` password as an environment variable in your shell. Example:
-+
-[source,sh]
-----
-export ELASTIC_PASSWORD="your_password"
-----
-+
-. Copy the `http_ca.crt` SSL certificate from the container to your local machine.
-+
-[source,sh]
-----
-docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt .
-----
-+
-. Make a REST API call to {es} to ensure the {es} container is running.
-+
-[source,sh]
-----
-curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200
-----
-// NOTCONSOLE
-
-*Run {kib}*
-
-{kib} is the user interface for Elastic.
-It's great for getting started with {es} and exploring your data.
-We'll be using the Dev Tools *Console* in {kib} to make REST API calls to {es}.
-
-In a new terminal session, start {kib} and connect it to your {es} container:
-
-[source,sh,subs="attributes"]
-----
-docker pull {kib-docker-image}
-docker run --name kibana --net elastic -p 5601:5601 {kib-docker-image}
-----
-
-When you start {kib}, a unique URL is output to your terminal.
-To access {kib}:
-
-. Open the generated URL in your browser.
-. Paste the enrollment token that you copied earlier, to connect your {kib} instance with {es}.
-. Log in to {kib} as the `elastic` user with the password that was generated when you started {es}.
+Refer to our <> to quickly spin up a local development environment in Docker. If you don't need {kib}, you'll only need one `docker run` command to start {es}. Please note that this setup is *not suitable for production use*.
// end::self-managed[]
\ No newline at end of file
diff --git a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc
index 47403df450bd2..93edc0918614d 100644
--- a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc
+++ b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc
@@ -1,7 +1,7 @@
// tag::elser[]
Hybrid search between a semantic and lexical query can be achieved by using an
-< as part of your search request. Provide a
+<> as part of your search request. Provide a
`text_expansion` query and a full-text query as
<> for the `rrf` retriever. The `rrf`
retriever uses <> to rank the top documents.
diff --git a/docs/reference/transform/images/transform-rule.png b/docs/reference/transform/images/transform-rule.png
deleted file mode 100644
index c43dd6c1be929..0000000000000
Binary files a/docs/reference/transform/images/transform-rule.png and /dev/null differ
diff --git a/docs/reference/transform/transform-alerts.asciidoc b/docs/reference/transform/transform-alerts.asciidoc
index e3ea82d34ec2e..988dc5effe956 100644
--- a/docs/reference/transform/transform-alerts.asciidoc
+++ b/docs/reference/transform/transform-alerts.asciidoc
@@ -18,19 +18,20 @@ refer to
You can create {transform} rules under **{stack-manage-app} > {rules-ui}**.
-. On the *Create rule* window, give a name to the rule and optionally provide
-tags. Select the {transform} health rule type:
+. Click *Create rule* and select the {transform} health rule type.
+
+. Give a name to the rule and optionally provide tags.
+
+. Select the {transform} or {transforms} to include. You can also use a special
+character (`*`) to apply the rule to all your {transforms}. {transforms-cap}
+created after the rule are automatically included.
+
--
[role="screenshot"]
-image::images/transform-rule.png["Creating a transform health rule",500]
+image::images/transform-check-config.png["Selecting health check",500]
// NOTE: This is screenshot is automatically generated. Do not edit it directly.
--
-. Select the {transform} or {transforms} to include. You can also use a special
-character (`*`) to apply the rule to all your {transforms}. {transforms-cap}
-created after the rule are automatically included.
-
. The following health checks are available and enabled by default:
+
--
@@ -41,10 +42,6 @@ _{transform-cap} is not started_::
_Unhealthy {transform}_::
Get alerts when a {transform} has an unhealthy status.
The notification message contains status details and related issues.
-
-[role="screenshot"]
-image::images/transform-check-config.png["Selecting health check",500]
-// NOTE: This is screenshot is automatically generated. Do not edit it directly.
--
. Set the check interval, which defines how often to evaluate the rule conditions.
diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc
index 01ef39b69c529..ceff8619062c4 100644
--- a/docs/reference/troubleshooting.asciidoc
+++ b/docs/reference/troubleshooting.asciidoc
@@ -138,3 +138,5 @@ include::troubleshooting/troubleshooting-searches.asciidoc[]
include::troubleshooting/troubleshooting-shards-capacity.asciidoc[]
include::troubleshooting/troubleshooting-unbalanced-cluster.asciidoc[]
+
+include::troubleshooting/diagnostic.asciidoc[]
diff --git a/docs/reference/troubleshooting/diagnostic.asciidoc b/docs/reference/troubleshooting/diagnostic.asciidoc
new file mode 100644
index 0000000000000..a944ca88d285d
--- /dev/null
+++ b/docs/reference/troubleshooting/diagnostic.asciidoc
@@ -0,0 +1,152 @@
+[[diagnostic]]
+== Capturing diagnostics
+++++
+Capture diagnostics
+++++
+:keywords: Elasticsearch diagnostic, diagnostics
+
+The {es} https://github.com/elastic/support-diagnostics[Support Diagnostic] tool captures a point-in-time snapshot of cluster statistics and most settings.
+It works against all {es} versions.
+
+This information can be used to troubleshoot problems with your cluster. For examples of issues that you can troubleshoot using Support Diagnostic tool output, refer to https://www.elastic.co/blog/why-does-elastic-support-keep-asking-for-diagnostic-files[the Elastic blog].
+
+You can generate diagnostic information using this tool before you contact https://support.elastic.co[Elastic Support] or
+https://discuss.elastic.co[Elastic Discuss] to minimize turnaround time.
+
+[discrete]
+[[diagnostic-tool-requirements]]
+=== Requirements
+
+- Java Runtime Environment or Java Development Kit v1.8 or higher
+
+[discrete]
+[[diagnostic-tool-access]]
+=== Access the tool
+
+The Support Diagnostic tool is included as a sub-library in some Elastic deployments:
+
+* {ece}: Located under **{ece}** > **Deployment** > **Operations** >
+**Prepare Bundle** > **{es}**.
+* {eck}: Run as https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-take-eck-dump.html[`eck-diagnostics`].
+
+You can also directly download the `diagnostics-X.X.X-dist.zip` file for the latest Support Diagnostic release
+from https://github.com/elastic/support-diagnostics/releases/latest[the `support-diagnostic` repo].
+
+
+[discrete]
+[[diagnostic-capture]]
+=== Capture diagnostic information
+
+To capture an {es} diagnostic:
+
+. In a terminal, verify that your network and user permissions are sufficient to connect to your {es}
+cluster by polling the cluster's <>.
++
+For example, with the parameters `host:localhost`, `port:9200`, and `username:elastic`, you'd use the following curl request:
++
+[source,sh]
+----
+curl -X GET -k -u elastic -p https://localhost:9200/_cluster/health
+----
+// NOTCONSOLE
++
+If you receive a an HTTP 200 `OK` response, then you can proceed to the next step. If you receive a different
+response code, then <> before proceeding.
+
+. Using the same environment parameters, run the diagnostic tool script.
++
+For information about the parameters that you can pass to the tool, refer to the https://github.com/elastic/support-diagnostics#standard-options[diagnostic
+parameter reference].
++
+The following command options are recommended:
++
+**Unix-based systems**
++
+[source,sh]
+----
+sudo ./diagnostics.sh --type local --host localhost --port 9200 -u elastic -p --bypassDiagVerify --ssl --noVerify
+----
++
+**Windows**
++
+[source,sh]
+----
+sudo .\diagnostics.bat --type local --host localhost --port 9200 -u elastic -p --bypassDiagVerify --ssl --noVerify
+----
++
+[TIP]
+.Script execution modes
+====
+You can execute the script in three https://github.com/elastic/support-diagnostics#diagnostic-types[modes]:
+
+* `local` (default, recommended): Polls the <>,
+gathers operating system info, and captures cluster and GC logs.
+
+* `remote`: Establishes an ssh session
+to the applicable target server to pull the same information as `local`.
+
+* `api`: Polls the <>. All other data must be
+collected manually.
+====
+
+. When the script has completed, verify that no errors were logged to `diagnostic.log`.
+If the log file contains errors, then refer to <>.
+
+. If the script completed without errors, then an archive with the format `-diagnostics-.zip` is created in the working directory, or an output directory you have specified. You can review or share the diagnostic archive as needed.
+
+[discrete]
+[[diagnostic-non-200]]
+=== Diagnose a non-200 cluster health response
+
+When you poll your cluster health, if you receive any response other than `200 0K`, then the diagnostic tool
+might not work as intended. The following are possible error codes and their resolutions:
+
+HTTP 401 `UNAUTHENTICATED`::
+Additional information in the error will usually indicate either
+that your `username:password` pair is invalid, or that your `.security`
+index is unavailable and you need to setup a temporary
+<> user with `role:superuser` to authenticate.
+
+HTTP 403 `UNAUTHORIZED`::
+Your `username` is recognized but
+has insufficient permissions to run the diagnostic. Either use a different
+username or elevate the user's privileges.
+
+HTTP 429 `TOO_MANY_REQUESTS` (for example, `circuit_breaking_exception`)::
+Your username authenticated and authorized, but the cluster is under
+sufficiently high strain that it's not responding to API calls. These
+responses are usually intermittent. You can proceed with running the diagnostic,
+but the diagnostic results might be incomplete.
+
+HTTP 504 `BAD_GATEWAY`::
+Your network is experiencing issues reaching the cluster. You might be using a proxy or firewall.
+Consider running the diagnostic tool from a different location, confirming your port, or using an IP
+instead of a URL domain.
+
+HTTP 503 `SERVICE_UNAVAILABLE` (for example, `master_not_discovered_exception`)::
+Your cluster does not currently have an elected master node, which is
+required for it to be API-responsive. This might be temporary while the master
+node rotates. If the issue persists, then <>
+before proceeding.
+
+[discrete]
+[[diagnostic-log-errors]]
+=== Diagnose errors in `diagnostic.log`
+
+The following are common errors that you might encounter when running the diagnostic tool:
+
+* `Error: Could not find or load main class com.elastic.support.diagnostics.DiagnosticApp`
++
+This indicates that you accidentally downloaded the source code file
+instead of `diagnostics-X.X.X-dist.zip` from the releases page.
+
+* `Could not retrieve the Elasticsearch version due to a system or network error - unable to continue.`
++
+This indicates that the diagnostic couldn't run commands against the cluster.
+Poll the cluster's health again, and ensure that you're using the same parameters
+when you run the dianostic batch or shell file.
+
+* A `security_exception` that includes `is unauthorized for user`:
++
+The provided user has insufficient admin permissions to run the diagnostic tool. Use another
+user, or grant the user `role:superuser` privileges.
\ No newline at end of file
diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java
index 201f0810f4d9b..32c4446e71dd2 100644
--- a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java
+++ b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java
@@ -17,6 +17,7 @@
import java.io.Closeable;
import java.io.IOException;
+import java.io.StringWriter;
import java.util.Arrays;
/**
@@ -45,7 +46,7 @@ public Command(final String description) {
}
/** Parses options for this command from args and executes it. */
- public final int main(String[] args, Terminal terminal, ProcessInfo processInfo) throws Exception {
+ public final int main(String[] args, Terminal terminal, ProcessInfo processInfo) throws IOException {
try {
mainWithoutErrorHandling(args, terminal, processInfo);
} catch (OptionException e) {
@@ -59,6 +60,14 @@ public final int main(String[] args, Terminal terminal, ProcessInfo processInfo)
}
printUserException(terminal, e);
return e.exitCode;
+ } catch (IOException ioe) {
+ terminal.errorPrintln(ioe);
+ return ExitCodes.IO_ERROR;
+ } catch (Throwable t) {
+ // It's acceptable to catch Throwable at this point:
+ // We're about to exit and only want to print the stacktrace with appropriate formatting (e.g. JSON).
+ terminal.errorPrintln(t);
+ return ExitCodes.CODE_ERROR;
}
return ExitCodes.OK;
}
@@ -96,15 +105,17 @@ public OptionSet parseOptions(String[] args) {
/** Prints a help message for the command to the terminal. */
private void printHelp(Terminal terminal, boolean toStdError) throws IOException {
+ StringWriter writer = new StringWriter();
+ parser.printHelpOn(writer);
if (toStdError) {
terminal.errorPrintln(description);
terminal.errorPrintln("");
- parser.printHelpOn(terminal.getErrorWriter());
+ terminal.errorPrintln(writer.toString());
} else {
terminal.println(description);
terminal.println("");
printAdditionalHelp(terminal);
- parser.printHelpOn(terminal.getWriter());
+ terminal.println(writer.toString());
}
}
diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java
index 69cb76636a996..aaf233438f263 100644
--- a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java
+++ b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java
@@ -72,6 +72,13 @@ protected Terminal(Reader reader, PrintWriter outWriter, PrintWriter errWriter)
this.errWriter = errWriter;
}
+ /**
+ * Constructs a terminal instance from a delegate instance.
+ */
+ protected Terminal(Terminal delegate) {
+ this(delegate.reader, delegate.outWriter, delegate.errWriter);
+ }
+
/**
* Sets the verbosity of the terminal.
*
@@ -113,14 +120,12 @@ public final Reader getReader() {
return reader;
}
- /** Returns a Writer which can be used to write to the terminal directly using standard output. */
- public final PrintWriter getWriter() {
- return outWriter;
- }
-
- /** Returns a Writer which can be used to write to the terminal directly using standard error. */
- public final PrintWriter getErrorWriter() {
- return errWriter;
+ /**
+ * Returns a line based OutputStream wrapping this Terminal's println.
+ * Note, this OutputStream is not thread-safe!
+ */
+ public final OutputStream asLineOutputStream(Charset charset) {
+ return new LineOutputStream(charset);
}
/**
@@ -138,7 +143,7 @@ public InputStream getInputStream() {
* Returns an OutputStream which can be used to write to the terminal directly using standard output.
*
*
May return {@code null} if this Terminal is not capable of binary output.
- * This corresponds with the underlying stream of bytes written to by {@link #getWriter()}.
+ * This corresponds with the underlying stream of bytes written to by {@link #println(CharSequence)}.
*/
@Nullable
public OutputStream getOutputStream() {
@@ -152,12 +157,12 @@ public final void println(CharSequence msg) {
/** Prints a line to the terminal at {@code verbosity} level. */
public final void println(Verbosity verbosity, CharSequence msg) {
- print(verbosity, outWriter, msg, true);
+ print(verbosity, outWriter, msg, true, true);
}
/** Prints message to the terminal's standard output at {@code verbosity} level, without a newline. */
public final void print(Verbosity verbosity, String msg) {
- print(verbosity, outWriter, msg, false);
+ print(verbosity, outWriter, msg, false, true);
}
/**
@@ -165,30 +170,49 @@ public final void print(Verbosity verbosity, String msg) {
*
* Subclasses may override if the writers are not implemented.
*/
- protected void print(Verbosity verbosity, PrintWriter writer, CharSequence msg, boolean newline) {
+ protected void print(Verbosity verbosity, PrintWriter writer, CharSequence msg, boolean newline, boolean flush) {
if (isPrintable(verbosity)) {
if (newline) {
writer.println(msg);
} else {
writer.print(msg);
}
- writer.flush();
+ if (flush) {
+ writer.flush();
+ }
}
}
/** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, without a newline. */
public final void errorPrint(Verbosity verbosity, String msg) {
- print(verbosity, errWriter, msg, false);
+ print(verbosity, errWriter, msg, false, true);
}
/** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level. */
public final void errorPrintln(String msg) {
- errorPrintln(Verbosity.NORMAL, msg);
+ print(Verbosity.NORMAL, errWriter, msg, true, true);
}
/** Prints a line to the terminal's standard error at {@code verbosity} level. */
public final void errorPrintln(Verbosity verbosity, String msg) {
- print(verbosity, errWriter, msg, true);
+ print(verbosity, errWriter, msg, true, true);
+ }
+
+ /** Prints a line to the terminal's standard error at {@code verbosity} level, with an optional flush */
+ public final void errorPrintln(Verbosity verbosity, String msg, boolean flush) {
+ print(verbosity, errWriter, msg, true, flush);
+ }
+
+ /** Prints a stacktrace to the terminal's standard error at {@code verbosity} level. */
+ public void errorPrintln(Verbosity verbosity, Throwable throwable) {
+ if (isPrintable(verbosity)) {
+ throwable.printStackTrace(errWriter);
+ }
+ }
+
+ /** Prints a stacktrace to the terminal's standard error at {@link Verbosity#SILENT} verbosity level. */
+ public void errorPrintln(Throwable throwable) {
+ errorPrintln(Verbosity.SILENT, throwable);
}
/** Checks if is enough {@code verbosity} level to be printed */
@@ -339,4 +363,54 @@ public OutputStream getOutputStream() {
return System.out;
}
}
+
+ /** A line based OutputStream wrapping this Terminal's println, not thread-safe! */
+ private class LineOutputStream extends OutputStream {
+ static final int DEFAULT_BUFFER_LENGTH = 1024;
+ static final int MAX_BUFFER_LENGTH = DEFAULT_BUFFER_LENGTH * 8;
+
+ private final Charset charset;
+ private byte[] bytes = new byte[DEFAULT_BUFFER_LENGTH];
+ private int count = 0;
+
+ LineOutputStream(Charset charset) {
+ this.charset = charset;
+ }
+
+ @Override
+ public void write(int b) {
+ if (b == 0) return;
+ if (b == '\n') {
+ flush(true);
+ return;
+ }
+ if (count == bytes.length) {
+ if (count >= MAX_BUFFER_LENGTH) {
+ flush(false);
+ } else {
+ bytes = Arrays.copyOf(bytes, 2 * bytes.length);
+ }
+ }
+ bytes[count++] = (byte) b;
+ }
+
+ private void flush(boolean newline) {
+ if (newline && count > 0 && bytes[count - 1] == '\r') {
+ --count; // drop CR on windows as well
+ }
+ String msg = count > 0 ? new String(bytes, 0, count, charset) : "";
+ print(Verbosity.NORMAL, outWriter, msg, newline, true);
+ count = 0;
+ if (bytes.length > DEFAULT_BUFFER_LENGTH) {
+ bytes = new byte[DEFAULT_BUFFER_LENGTH];
+ }
+ }
+
+ @Override
+ public void flush() {
+ if (count > 0) {
+ flush(false);
+ }
+ }
+ }
}
diff --git a/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java b/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java
index 9c1faf911a829..dffb93ebbf230 100644
--- a/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java
+++ b/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java
@@ -11,6 +11,17 @@
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTestCase.WithoutSecurityManager;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
+
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
@WithoutSecurityManager
public class TerminalTests extends ESTestCase {
@@ -20,4 +31,33 @@ public void testSystemTerminalIfRedirected() {
// Otherwise, JDK 22 doesn't provide a console if redirected.
assertEquals(Terminal.SystemTerminal.class, Terminal.DEFAULT.getClass());
}
+
+ public void testTerminalAsLineOutputStream() throws IOException {
+ PrintWriter stdOut = mock("stdOut");
+ PrintWriter stdErr = mock("stdErr");
+
+ OutputStream out = new Terminal(mock("reader"), stdOut, stdErr) {
+ }.asLineOutputStream(StandardCharsets.UTF_8);
+
+ out.write("123".getBytes(StandardCharsets.UTF_8));
+ out.write("456".getBytes(StandardCharsets.UTF_8));
+ out.write("789\r\n".getBytes(StandardCharsets.UTF_8)); // CR is removed as well
+
+ verify(stdOut).println(eq((CharSequence) "123456789"));
+ verify(stdOut).flush();
+ verifyNoMoreInteractions(stdOut, stdErr);
+
+ out.write("\n".getBytes(StandardCharsets.UTF_8));
+ verify(stdOut).println(eq((CharSequence) ""));
+ verify(stdOut, times(2)).flush();
+ verifyNoMoreInteractions(stdOut, stdErr);
+
+ out.write("a".getBytes(StandardCharsets.UTF_8));
+ out.flush();
+ verify(stdOut).print(eq((CharSequence) "a"));
+ verify(stdOut, times(3)).flush();
+
+ out.flush();
+ verifyNoMoreInteractions(stdOut, stdErr);
+ }
}
diff --git a/libs/core/src/main/java/org/elasticsearch/core/ReleasableIterator.java b/libs/core/src/main/java/org/elasticsearch/core/ReleasableIterator.java
index 68a4a136c5308..83a68c984a684 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/ReleasableIterator.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/ReleasableIterator.java
@@ -46,4 +46,30 @@ public String toString() {
};
}
+
+ /**
+ * Returns an empty iterator over the supplied value.
+ */
+ static ReleasableIterator empty() {
+ return new ReleasableIterator<>() {
+ @Override
+ public boolean hasNext() {
+ return false;
+ }
+
+ @Override
+ public T next() {
+ assert false : "hasNext is always false so next should never be called";
+ return null;
+ }
+
+ @Override
+ public void close() {}
+
+ @Override
+ public String toString() {
+ return "ReleasableIterator[]";
+ }
+ };
+ }
}
diff --git a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java
index 5153ba688d6a9..74acb00925e5a 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java
@@ -61,4 +61,15 @@ public static Predicate onOrAfter(RestApiVersion restApiVersion)
};
}
+ public static RestApiVersion forMajor(int major) {
+ switch (major) {
+ case 7 -> {
+ return V_7;
+ }
+ case 8 -> {
+ return V_8;
+ }
+ default -> throw new IllegalArgumentException("Unknown REST API version " + major);
+ }
+ }
}
diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle
index 168eb533fea74..7a545787bbdae 100644
--- a/libs/native/libraries/build.gradle
+++ b/libs/native/libraries/build.gradle
@@ -18,7 +18,7 @@ configurations {
}
var zstdVersion = "1.5.5"
-var vecVersion = "1.0.6"
+var vecVersion = "1.0.8"
repositories {
exclusiveContent {
diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java
index 56017d3a8a20a..c390cfc9289c6 100644
--- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java
+++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java
@@ -45,7 +45,15 @@ public Optional getVectorSimilarityFunctions() {
}
static boolean isNativeVectorLibSupported() {
- return Runtime.version().feature() >= 21 && isMacOrLinuxAarch64() && checkEnableSystemProperty();
+ return Runtime.version().feature() >= 21 && (isMacOrLinuxAarch64() || isLinuxAmd64()) && checkEnableSystemProperty();
+ }
+
+ /**
+ * Returns true iff the architecture is x64 (amd64) and the OS Linux (the OS we currently support for the native lib).
+ */
+ static boolean isLinuxAmd64() {
+ String name = System.getProperty("os.name");
+ return (name.startsWith("Linux")) && System.getProperty("os.arch").equals("amd64");
}
/** Returns true iff the OS is Mac or Linux, and the architecture is aarch64. */
diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java
index 5313984ac6d61..0af87154960ad 100644
--- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java
+++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java
@@ -17,7 +17,10 @@
import java.lang.foreign.MemorySegment;
import java.lang.invoke.MethodHandle;
import java.nio.file.Files;
+import java.nio.file.Path;
import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
import static java.lang.foreign.ValueLayout.ADDRESS;
import static java.lang.foreign.ValueLayout.JAVA_INT;
@@ -26,31 +29,49 @@
class JdkSystemdLibrary implements SystemdLibrary {
static {
- System.load(findLibSystemd());
- }
-
- // On some systems libsystemd does not have a non-versioned symlink. System.loadLibrary only knows how to find
- // non-versioned library files. So we must manually check the library path to find what we need.
- static String findLibSystemd() {
- final String libsystemd = "libsystemd.so.0";
- String libpath = System.getProperty("java.library.path");
- for (String basepathStr : libpath.split(":")) {
- var basepath = Paths.get(basepathStr);
- if (Files.exists(basepath) == false) {
- continue;
+ // Find and load libsystemd. We attempt all instances of
+ // libsystemd in case of multiarch systems, and stop when
+ // one is successfully loaded. If none can be loaded,
+ // UnsatisfiedLinkError will be thrown.
+ List paths = findLibSystemd();
+ if (paths.isEmpty()) {
+ String libpath = System.getProperty("java.library.path");
+ throw new UnsatisfiedLinkError("Could not find libsystemd in java.library.path: " + libpath);
+ }
+ UnsatisfiedLinkError last = null;
+ for (String path : paths) {
+ try {
+ System.load(path);
+ last = null;
+ break;
+ } catch (UnsatisfiedLinkError e) {
+ last = e;
}
- try (var stream = Files.walk(basepath)) {
+ }
+ if (last != null) {
+ throw last;
+ }
+ }
- var foundpath = stream.filter(Files::isDirectory).map(p -> p.resolve(libsystemd)).filter(Files::exists).findAny();
- if (foundpath.isPresent()) {
- return foundpath.get().toAbsolutePath().toString();
- }
+ // findLibSystemd returns a list of paths to instances of libsystemd
+ // found within java.library.path.
+ static List findLibSystemd() {
+ // Note: on some systems libsystemd does not have a non-versioned symlink.
+ // System.loadLibrary only knows how to find non-versioned library files,
+ // so we must manually check the library path to find what we need.
+ final Path libsystemd = Paths.get("libsystemd.so.0");
+ final String libpath = System.getProperty("java.library.path");
+ return Arrays.stream(libpath.split(":")).map(Paths::get).filter(Files::exists).flatMap(p -> {
+ try {
+ return Files.find(
+ p,
+ Integer.MAX_VALUE,
+ (fp, attrs) -> (attrs.isDirectory() == false && fp.getFileName().equals(libsystemd))
+ );
} catch (IOException e) {
throw new UncheckedIOException(e);
}
-
- }
- throw new UnsatisfiedLinkError("Could not find " + libsystemd + " in java.library.path: " + libpath);
+ }).map(p -> p.toAbsolutePath().toString()).toList();
}
private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS));
diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java
index adf32874c04f1..8c4cbb688abcd 100644
--- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java
+++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java
@@ -37,7 +37,9 @@ public boolean supported() {
var arch = System.getProperty("os.arch");
var osName = System.getProperty("os.name");
- if (jdkVersion >= 21 && arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux"))) {
+ if (jdkVersion >= 21
+ && ((arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux")))
+ || (arch.equals("amd64") && osName.equals("Linux")))) {
assertThat(vectorSimilarityFunctions, isPresent());
return true;
} else {
diff --git a/libs/vec/native/Dockerfile b/libs/vec/native/Dockerfile
index 25dcf4d4854d0..66eb7e92ef479 100644
--- a/libs/vec/native/Dockerfile
+++ b/libs/vec/native/Dockerfile
@@ -4,6 +4,7 @@ RUN apt update
RUN apt install -y gcc g++ openjdk-17-jdk
COPY . /workspace
WORKDIR /workspace
-RUN ./gradlew --quiet --console=plain clean vecSharedLibrary
+RUN ./gradlew --quiet --console=plain clean buildSharedLibrary
+RUN strip --strip-unneeded build/output/libvec.so
-CMD cat build/libs/vec/shared/libvec.so
+CMD cat build/output/libvec.so
diff --git a/libs/vec/native/build.gradle b/libs/vec/native/build.gradle
index 6a658da0644b7..7edf46d406862 100644
--- a/libs/vec/native/build.gradle
+++ b/libs/vec/native/build.gradle
@@ -12,9 +12,10 @@ var os = org.gradle.internal.os.OperatingSystem.current()
// To update this library run publish_vec_binaries.sh ( or ./gradlew vecSharedLibrary )
// Or
// For local development, build the docker image with:
-// docker build --platform linux/arm64 --progress=plain .
+// docker build --platform linux/arm64 --progress=plain . (for aarch64)
+// docker build --platform linux/amd64 --progress=plain . (for x64)
// Grab the image id from the console output, then, e.g.
-// docker run 9c9f36564c148b275aeecc42749e7b4580ded79dcf51ff6ccc008c8861e7a979 > build/libs/vec/shared/libvec.so
+// docker run 9c9f36564c148b275aeecc42749e7b4580ded79dcf51ff6ccc008c8861e7a979 > build/libs/vec/shared/$arch/libvec.so
//
// To run tests and benchmarks on a locally built libvec,
// 1. Temporarily comment out the download in libs/native/library/build.gradle
@@ -30,26 +31,83 @@ var os = org.gradle.internal.os.OperatingSystem.current()
group = 'org.elasticsearch'
+def platformName = System.getProperty("os.arch");
+
model {
+ platforms {
+ aarch64 {
+ architecture "aarch64"
+ }
+ amd64 {
+ architecture "x86-64"
+ }
+ }
toolChains {
gcc(Gcc) {
target("aarch64") {
cCompiler.executable = "/usr/bin/gcc"
+ cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=armv8-a"]) }
+ }
+ target("amd64") {
+ cCompiler.executable = "/usr/bin/gcc"
+ cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=core-avx2", "-Wno-incompatible-pointer-types"]) }
}
}
- clang(Clang)
- }
- platforms {
- aarch64 {
- architecture "aarch64"
+ cl(VisualCpp) {
+ eachPlatform { toolchain ->
+ def platform = toolchain.getPlatform()
+ if (platform.name == "x64") {
+ cCompiler.withArguments { args -> args.addAll(["/O2", "/LD", "-march=core-avx2"]) }
+ }
+ }
+ }
+ clang(Clang) {
+ target("amd64") {
+ cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=core-avx2"]) }
+ }
}
}
components {
vec(NativeLibrarySpec) {
targetPlatform "aarch64"
- binaries.withType(SharedLibraryBinarySpec) {
- cCompiler.args "-O3", "-std=c99", "-march=armv8-a"
+ targetPlatform "amd64"
+
+ sources {
+ c {
+ source {
+ srcDir "src/vec/c/${platformName}/"
+ include "*.c"
+ }
+ exportedHeaders {
+ srcDir "src/vec/headers/"
+ }
+ }
+ }
+ }
+ }
+}
+
+tasks.register('buildSharedLibrary') {
+ description = 'Assembles native shared library for the host architecture'
+ if (platformName.equals("aarch64")) {
+ dependsOn tasks.vecAarch64SharedLibrary
+ doLast {
+ copy {
+ from tasks.linkVecAarch64SharedLibrary.outputs.files.files
+ into layout.buildDirectory.dir('output');
+ duplicatesStrategy = 'INCLUDE'
+ }
+ }
+ } else if (platformName.equals("amd64")) {
+ dependsOn tasks.vecAmd64SharedLibrary
+ doLast {
+ copy {
+ from tasks.linkVecAmd64SharedLibrary.outputs.files.files
+ into layout.buildDirectory.dir('output');
+ duplicatesStrategy = 'INCLUDE'
}
}
+ } else {
+ throw new GradleException("Unsupported platform: " + platformName)
}
}
diff --git a/libs/vec/native/publish_vec_binaries.sh b/libs/vec/native/publish_vec_binaries.sh
index e17690160e253..2ed6c750ab9e8 100755
--- a/libs/vec/native/publish_vec_binaries.sh
+++ b/libs/vec/native/publish_vec_binaries.sh
@@ -19,7 +19,7 @@ if [ -z "$ARTIFACTORY_API_KEY" ]; then
exit 1;
fi
-VERSION="1.0.6"
+VERSION="1.0.8"
ARTIFACTORY_REPOSITORY="${ARTIFACTORY_REPOSITORY:-https://artifactory.elastic.dev/artifactory/elasticsearch-native/}"
TEMP=$(mktemp -d)
@@ -29,16 +29,22 @@ if curl -sS -I --fail --location "${ARTIFACTORY_REPOSITORY}/org/elasticsearch/ve
fi
echo 'Building Darwin binary...'
-./gradlew --quiet --console=plain vecSharedLibrary
+./gradlew --quiet --console=plain vecAarch64SharedLibrary
echo 'Building Linux binary...'
DOCKER_IMAGE=$(docker build --platform linux/arm64 --quiet .)
-docker run $DOCKER_IMAGE > build/libs/vec/shared/libvec.so
+docker run $DOCKER_IMAGE > build/libs/vec/shared/aarch64/libvec.so
+
+echo 'Building Linux x64 binary...'
+DOCKER_IMAGE=$(docker build --platform linux/amd64 --quiet .)
+docker run --platform linux/amd64 $DOCKER_IMAGE > build/libs/vec/shared/amd64/libvec.so
mkdir -p $TEMP/darwin-aarch64
mkdir -p $TEMP/linux-aarch64
-cp build/libs/vec/shared/libvec.dylib $TEMP/darwin-aarch64/
-cp build/libs/vec/shared/libvec.so $TEMP/linux-aarch64/
+mkdir -p $TEMP/linux-x64
+cp build/libs/vec/shared/aarch64/libvec.dylib $TEMP/darwin-aarch64/
+cp build/libs/vec/shared/aarch64/libvec.so $TEMP/linux-aarch64/
+cp build/libs/vec/shared/amd64/libvec.so $TEMP/linux-x64/
echo 'Uploading to Artifactory...'
(cd $TEMP && zip -rq - .) | curl -sS -X PUT -H "X-JFrog-Art-Api: ${ARTIFACTORY_API_KEY}" --data-binary @- --location "${ARTIFACTORY_REPOSITORY}/org/elasticsearch/vec/${VERSION}/vec-${VERSION}.zip"
diff --git a/libs/vec/native/src/vec/c/vec.c b/libs/vec/native/src/vec/c/aarch64/vec.c
similarity index 99%
rename from libs/vec/native/src/vec/c/vec.c
rename to libs/vec/native/src/vec/c/aarch64/vec.c
index 05dfe64a3be9b..478e5e84d3859 100644
--- a/libs/vec/native/src/vec/c/vec.c
+++ b/libs/vec/native/src/vec/c/aarch64/vec.c
@@ -121,7 +121,7 @@ static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) {
EXPORT int32_t sqr7u(int8_t* a, int8_t* b, size_t dims) {
int32_t res = 0;
int i = 0;
- if (i > SQR7U_STRIDE_BYTES_LEN) {
+ if (dims > SQR7U_STRIDE_BYTES_LEN) {
i += dims & ~(SQR7U_STRIDE_BYTES_LEN - 1);
res = sqr7u_inner(a, b, i);
}
diff --git a/libs/vec/native/src/vec/c/amd64/vec.c b/libs/vec/native/src/vec/c/amd64/vec.c
new file mode 100644
index 0000000000000..c9a49ad2d1d4d
--- /dev/null
+++ b/libs/vec/native/src/vec/c/amd64/vec.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+#include
+#include
+#include "vec.h"
+
+#include
+#include
+
+#ifndef DOT7U_STRIDE_BYTES_LEN
+#define DOT7U_STRIDE_BYTES_LEN 32 // Must be a power of 2
+#endif
+
+#ifndef SQR7U_STRIDE_BYTES_LEN
+#define SQR7U_STRIDE_BYTES_LEN 32 // Must be a power of 2
+#endif
+
+#ifdef _MSC_VER
+#include
+#elif __GNUC__
+#include
+#elif __clang__
+#include
+#endif
+
+// Multi-platform CPUID "intrinsic"; it takes as input a "functionNumber" (or "leaf", the eax registry). "Subleaf"
+// is always 0. Output is stored in the passed output parameter: output[0] = eax, output[1] = ebx, output[2] = ecx,
+// output[3] = edx
+static inline void cpuid(int output[4], int functionNumber) {
+#if defined(__GNUC__) || defined(__clang__)
+ // use inline assembly, Gnu/AT&T syntax
+ int a, b, c, d;
+ __asm("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "a"(functionNumber), "c"(0) : );
+ output[0] = a;
+ output[1] = b;
+ output[2] = c;
+ output[3] = d;
+
+#elif defined (_MSC_VER)
+ __cpuidex(output, functionNumber, 0);
+#else
+ #error Unsupported compiler
+#endif
+}
+
+// Utility function to horizontally add 8 32-bit integers
+static inline int hsum_i32_8(const __m256i a) {
+ const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
+ const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
+ const __m128i sum64 = _mm_add_epi32(hi64, sum128);
+ const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
+}
+
+EXPORT int vec_caps() {
+ int cpuInfo[4] = {-1};
+ // Calling __cpuid with 0x0 as the function_id argument
+ // gets the number of the highest valid function ID.
+ cpuid(cpuInfo, 0);
+ int functionIds = cpuInfo[0];
+ if (functionIds >= 7) {
+ cpuid(cpuInfo, 7);
+ int ebx = cpuInfo[1];
+ // AVX2 flag is the 5th bit
+ // We assume that all processors that have AVX2 also have FMA3
+ return (ebx & (1 << 5)) != 0;
+ }
+ return 0;
+}
+
+static inline int32_t dot7u_inner(int8_t* a, int8_t* b, size_t dims) {
+ const __m256i ones = _mm256_set1_epi16(1);
+
+ // Init accumulator(s) with 0
+ __m256i acc1 = _mm256_setzero_si256();
+
+#pragma GCC unroll 4
+ for(int i = 0; i < dims; i += DOT7U_STRIDE_BYTES_LEN) {
+ // Load packed 8-bit integers
+ __m256i va1 = _mm256_loadu_si256(a + i);
+ __m256i vb1 = _mm256_loadu_si256(b + i);
+
+ // Perform multiplication and create 16-bit values
+ // Vertically multiply each unsigned 8-bit integer from va with the corresponding
+ // 8-bit integer from vb, producing intermediate signed 16-bit integers.
+ const __m256i vab = _mm256_maddubs_epi16(va1, vb1);
+ // Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the results.
+ acc1 = _mm256_add_epi32(_mm256_madd_epi16(ones, vab), acc1);
+ }
+
+ // reduce (horizontally add all)
+ return hsum_i32_8(acc1);
+}
+
+EXPORT int32_t dot7u(int8_t* a, int8_t* b, size_t dims) {
+ int32_t res = 0;
+ int i = 0;
+ if (dims > DOT7U_STRIDE_BYTES_LEN) {
+ i += dims & ~(DOT7U_STRIDE_BYTES_LEN - 1);
+ res = dot7u_inner(a, b, i);
+ }
+ for (; i < dims; i++) {
+ res += a[i] * b[i];
+ }
+ return res;
+}
+
+static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) {
+ // Init accumulator(s) with 0
+ __m256i acc1 = _mm256_setzero_si256();
+
+ const __m256i ones = _mm256_set1_epi16(1);
+
+#pragma GCC unroll 4
+ for(int i = 0; i < dims; i += SQR7U_STRIDE_BYTES_LEN) {
+ // Load packed 8-bit integers
+ __m256i va1 = _mm256_loadu_si256(a + i);
+ __m256i vb1 = _mm256_loadu_si256(b + i);
+
+ const __m256i dist1 = _mm256_sub_epi8(va1, vb1);
+ const __m256i abs_dist1 = _mm256_sign_epi8(dist1, dist1);
+ const __m256i sqr1 = _mm256_maddubs_epi16(abs_dist1, abs_dist1);
+
+ acc1 = _mm256_add_epi32(_mm256_madd_epi16(ones, sqr1), acc1);
+ }
+
+ // reduce (accumulate all)
+ return hsum_i32_8(acc1);
+}
+
+EXPORT int32_t sqr7u(int8_t* a, int8_t* b, size_t dims) {
+ int32_t res = 0;
+ int i = 0;
+ if (dims > SQR7U_STRIDE_BYTES_LEN) {
+ i += dims & ~(SQR7U_STRIDE_BYTES_LEN - 1);
+ res = sqr7u_inner(a, b, i);
+ }
+ for (; i < dims; i++) {
+ int32_t dist = a[i] - b[i];
+ res += dist * dist;
+ }
+ return res;
+}
+
diff --git a/libs/vec/native/src/vec/headers/vec.h b/libs/vec/native/src/vec/headers/vec.h
index 5d3806dfccbe6..49fa29ec6fae9 100644
--- a/libs/vec/native/src/vec/headers/vec.h
+++ b/libs/vec/native/src/vec/headers/vec.h
@@ -7,7 +7,7 @@
*/
#ifdef _MSC_VER
-#define EXPORT extern "C" __declspec(dllexport)
+#define EXPORT __declspec(dllexport)
#elif defined(__GNUC__) && !defined(__clang__)
#define EXPORT __attribute__((externally_visible,visibility("default")))
#elif __clang__
diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/AbstractInt7ScalarQuantizedVectorScorer.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/AbstractInt7ScalarQuantizedVectorScorer.java
index 2e60079da8649..2be0aa53f7c57 100644
--- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/AbstractInt7ScalarQuantizedVectorScorer.java
+++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/AbstractInt7ScalarQuantizedVectorScorer.java
@@ -78,7 +78,7 @@ protected final void checkOrdinal(int ord) {
}
}
- protected final float fallbackScore(int firstByteOffset, int secondByteOffset) throws IOException {
+ protected final float fallbackScore(long firstByteOffset, long secondByteOffset) throws IOException {
input.seek(firstByteOffset);
byte[] a = new byte[dims];
input.readBytes(a, 0, a.length);
diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java
index f92bf0b52ed07..9b452219bd635 100644
--- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java
+++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java
@@ -34,8 +34,8 @@ public float score(int firstOrd, int secondOrd) throws IOException {
checkOrdinal(secondOrd);
final int length = dims;
- int firstByteOffset = firstOrd * (length + Float.BYTES);
- int secondByteOffset = secondOrd * (length + Float.BYTES);
+ long firstByteOffset = (long) firstOrd * (length + Float.BYTES);
+ long secondByteOffset = (long) secondOrd * (length + Float.BYTES);
MemorySegment firstSeg = segmentSlice(firstByteOffset, length);
input.seek(firstByteOffset + length);
diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java
index e1f16c6909cf4..55b08a899bd7c 100644
--- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java
+++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java
@@ -34,8 +34,8 @@ public float score(int firstOrd, int secondOrd) throws IOException {
checkOrdinal(secondOrd);
final int length = dims;
- int firstByteOffset = firstOrd * (length + Float.BYTES);
- int secondByteOffset = secondOrd * (length + Float.BYTES);
+ long firstByteOffset = (long) firstOrd * (length + Float.BYTES);
+ long secondByteOffset = (long) secondOrd * (length + Float.BYTES);
MemorySegment firstSeg = segmentSlice(firstByteOffset, length);
MemorySegment secondSeg = segmentSlice(secondByteOffset, length);
diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java
index bd6fc921f1832..5cdfc62bc9071 100644
--- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java
+++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java
@@ -34,8 +34,8 @@ public float score(int firstOrd, int secondOrd) throws IOException {
checkOrdinal(secondOrd);
final int length = dims;
- int firstByteOffset = firstOrd * (length + Float.BYTES);
- int secondByteOffset = secondOrd * (length + Float.BYTES);
+ long firstByteOffset = (long) firstOrd * (length + Float.BYTES);
+ long secondByteOffset = (long) secondOrd * (length + Float.BYTES);
MemorySegment firstSeg = segmentSlice(firstByteOffset, length);
input.seek(firstByteOffset + length);
diff --git a/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java b/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java
index 771f665fb4084..13f2d5a03ec76 100644
--- a/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java
+++ b/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java
@@ -39,7 +39,9 @@ public static boolean supported() {
var arch = System.getProperty("os.arch");
var osName = System.getProperty("os.name");
- if (jdkVersion >= 21 && arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux"))) {
+ if (jdkVersion >= 21
+ && (arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux"))
+ || arch.equals("amd64") && osName.equals("Linux"))) {
assertThat(factory, isPresent());
return true;
} else {
diff --git a/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java b/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java
index 115cf8e8cf9f8..246ddaeb2ebcf 100644
--- a/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java
+++ b/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java
@@ -8,6 +8,8 @@
package org.elasticsearch.vec;
+import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
+
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
@@ -17,6 +19,8 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
+import java.util.Objects;
+import java.util.Random;
import java.util.function.Function;
import static org.elasticsearch.vec.VectorSimilarityType.COSINE;
@@ -226,6 +230,67 @@ void testRandomSliceImpl(int dims, long maxChunkSize, int initialPadding, Functi
}
}
+ // Tests with a large amount of data (> 2GB), which ensures that data offsets do not overflow
+ @Nightly
+ public void testLarge() throws IOException {
+ var factory = AbstractVectorTestCase.factory.get();
+
+ try (Directory dir = new MMapDirectory(createTempDir(getTestName()))) {
+ final int dims = 8192;
+ final int size = 262144;
+ final float correction = randomFloat();
+
+ String fileName = getTestName() + "-" + dims;
+ logger.info("Testing " + fileName);
+ try (IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT)) {
+ for (int i = 0; i < size; i++) {
+ var vec = vector(i, dims);
+ var off = (float) i;
+ out.writeBytes(vec, 0, vec.length);
+ out.writeInt(Float.floatToIntBits(off));
+ }
+ }
+ try (IndexInput in = dir.openInput(fileName, IOContext.DEFAULT)) {
+ for (int times = 0; times < TIMES; times++) {
+ int idx0 = randomIntBetween(0, size - 1);
+ int idx1 = size - 1;
+ float off0 = (float) idx0;
+ float off1 = (float) idx1;
+ // dot product
+ float expected = luceneScore(DOT_PRODUCT, vector(idx0, dims), vector(idx1, dims), correction, off0, off1);
+ var scorer = factory.getInt7ScalarQuantizedVectorScorer(dims, size, correction, DOT_PRODUCT, in).get();
+ assertThat(scorer.score(idx0, idx1), equalTo(expected));
+ assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(idx0).score(idx1), equalTo(expected));
+ // max inner product
+ expected = luceneScore(MAXIMUM_INNER_PRODUCT, vector(idx0, dims), vector(idx1, dims), correction, off0, off1);
+ scorer = factory.getInt7ScalarQuantizedVectorScorer(dims, size, correction, MAXIMUM_INNER_PRODUCT, in).get();
+ assertThat(scorer.score(idx0, idx1), equalTo(expected));
+ assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(idx0).score(idx1), equalTo(expected));
+ // cosine
+ expected = luceneScore(COSINE, vector(idx0, dims), vector(idx1, dims), correction, off0, off1);
+ scorer = factory.getInt7ScalarQuantizedVectorScorer(dims, size, correction, COSINE, in).get();
+ assertThat(scorer.score(idx0, idx1), equalTo(expected));
+ assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(idx0).score(idx1), equalTo(expected));
+ // euclidean
+ expected = luceneScore(EUCLIDEAN, vector(idx0, dims), vector(idx1, dims), correction, off0, off1);
+ scorer = factory.getInt7ScalarQuantizedVectorScorer(dims, size, correction, EUCLIDEAN, in).get();
+ assertThat(scorer.score(idx0, idx1), equalTo(expected));
+ assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(idx0).score(idx1), equalTo(expected));
+ }
+ }
+ }
+ }
+
+ // creates the vector based on the given ordinal, which is reproducible given the ord and dims
+ static byte[] vector(int ord, int dims) {
+ var random = new Random(Objects.hash(ord, dims));
+ byte[] ba = new byte[dims];
+ for (int i = 0; i < dims; i++) {
+ ba[i] = (byte) RandomNumbers.randomIntBetween(random, MIN_INT7_VALUE, MAX_INT7_VALUE);
+ }
+ return ba;
+ }
+
static Function BYTE_ARRAY_RANDOM_INT7_FUNC = size -> {
byte[] ba = new byte[size];
randomBytesBetween(ba, MIN_INT7_VALUE, MAX_INT7_VALUE);
diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java
index 3802d572e04dd..5fe72c38078ee 100644
--- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java
+++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java
@@ -201,12 +201,12 @@ public void testGeneralMockupEcsMappings() throws Exception {
"host": {
"cpu": {
"usage": 0.68
- }
- },
- "geo": {
- "location": {
- "lon": -73.614830,
- "lat": 45.505918
+ },
+ "geo": {
+ "location": {
+ "lon": -73.614830,
+ "lat": 45.505918
+ }
}
},
"data_stream": {
@@ -414,7 +414,10 @@ public void testGeneralMockupEcsMappings() throws Exception {
getValueFromPath(properties, List.of("host", "properties", "cpu", "properties", "usage", "scaling_factor")),
is(1000.0)
);
- assertThat(getValueFromPath(properties, List.of("geo", "properties", "location", "type")), is("geo_point"));
+ assertThat(
+ getValueFromPath(properties, List.of("host", "properties", "geo", "properties", "location", "type")),
+ is("geo_point")
+ );
assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "dataset", "type")), is("constant_keyword"));
assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "namespace", "type")), is("constant_keyword"));
assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "type", "type")), is("constant_keyword"));
diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java
index 2370cca08b23e..79d33a95c4709 100644
--- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java
+++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java
@@ -463,7 +463,6 @@ public void testNoSubobjects() throws Exception {
{
"@timestamp": "2023-06-12",
"start_timestamp": "2023-06-08",
- "location" : "POINT (-71.34 41.12)",
"test": "flattened",
"test.start_timestamp": "not a date",
"test.start-timestamp": "not a date",
@@ -497,7 +496,7 @@ public void testNoSubobjects() throws Exception {
"vulnerability.score.version": "2.0",
"vulnerability.textual_score": "bad",
"host.cpu.usage": 0.68,
- "geo.location": [-73.614830, 45.505918],
+ "host.geo.location": [-73.614830, 45.505918],
"data_stream.dataset": "nginx.access",
"data_stream.namespace": "production",
"data_stream.custom": "whatever",
@@ -521,8 +520,7 @@ public void testNoSubobjects() throws Exception {
},
"fields": [
"data_stream.type",
- "location",
- "geo.location",
+ "host.geo.location",
"test.start-timestamp",
"test.start_timestamp",
"vulnerability.textual_score"
@@ -537,14 +535,9 @@ public void testNoSubobjects() throws Exception {
// verify that data_stream.type has the correct constant_keyword value
assertThat(fields.get("data_stream.type"), is(List.of("logs")));
// verify geo_point subfields evaluation
- assertThat(((List