diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml b/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml deleted file mode 100644 index d37bdf380f926..0000000000000 --- a/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml +++ /dev/null @@ -1,20 +0,0 @@ -config: - allow-labels: test-windows -steps: - - group: bwc-snapshots-windows - steps: - - label: "{{matrix.BWC_VERSION}} / bwc-snapshots-windows" - key: "bwc-snapshots-windows" - command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh - env: - GRADLE_TASK: "v{{matrix.BWC_VERSION}}#bwcTest" - timeout_in_minutes: 300 - matrix: - setup: - BWC_VERSION: $SNAPSHOT_BWC_VERSIONS - agents: - provider: gcp - image: family/elasticsearch-windows-2022 - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 diff --git a/docs/changelog/107047.yaml b/docs/changelog/107047.yaml new file mode 100644 index 0000000000000..89caed6f55074 --- /dev/null +++ b/docs/changelog/107047.yaml @@ -0,0 +1,6 @@ +pr: 107047 +summary: "Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields" +area: Search +type: bug +issues: + - 106846 diff --git a/docs/changelog/110527.yaml b/docs/changelog/110527.yaml new file mode 100644 index 0000000000000..3ab19ecaaaa76 --- /dev/null +++ b/docs/changelog/110527.yaml @@ -0,0 +1,5 @@ +pr: 110527 +summary: "ESQL: Add boolean support to Max and Min aggs" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/110651.yaml b/docs/changelog/110651.yaml new file mode 100644 index 0000000000000..c25c63ee0284a --- /dev/null +++ b/docs/changelog/110651.yaml @@ -0,0 +1,5 @@ +pr: 110651 +summary: "Remove `default_field: message` from metrics index templates" +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/110666.yaml b/docs/changelog/110666.yaml new file mode 100644 index 0000000000000..d96f8e2024c81 --- /dev/null +++ b/docs/changelog/110666.yaml @@ -0,0 +1,5 @@ +pr: 110666 +summary: Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases` +area: Ingest Node +type: bug +issues: [] diff --git a/docs/reference/esql/functions/description/max.asciidoc b/docs/reference/esql/functions/description/max.asciidoc index ffc15dcd4c8bd..27a76ed69c3c0 100644 --- a/docs/reference/esql/functions/description/max.asciidoc +++ b/docs/reference/esql/functions/description/max.asciidoc @@ -2,4 +2,4 @@ *Description* -The maximum value of a numeric field. +The maximum value of a field. diff --git a/docs/reference/esql/functions/description/min.asciidoc b/docs/reference/esql/functions/description/min.asciidoc index 4f640854dbd37..406125b5761d1 100644 --- a/docs/reference/esql/functions/description/min.asciidoc +++ b/docs/reference/esql/functions/description/min.asciidoc @@ -2,4 +2,4 @@ *Description* -The minimum value of a numeric field. +The minimum value of a field. diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index aaa765ea79ce4..bc7380bd76dd4 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -2,12 +2,24 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "agg", "name" : "max", - "description" : "The maximum value of a numeric field.", + "description" : "The maximum value of a field.", "signatures" : [ { "params" : [ { - "name" : "number", + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", "type" : "datetime", "optional" : false, "description" : "" @@ -19,7 +31,7 @@ { "params" : [ { - "name" : "number", + "name" : "field", "type" : "double", "optional" : false, "description" : "" @@ -31,7 +43,7 @@ { "params" : [ { - "name" : "number", + "name" : "field", "type" : "integer", "optional" : false, "description" : "" @@ -43,7 +55,7 @@ { "params" : [ { - "name" : "number", + "name" : "field", "type" : "long", "optional" : false, "description" : "" diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index ff48c87ecb8ea..937391bf242ac 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -2,12 +2,24 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "agg", "name" : "min", - "description" : "The minimum value of a numeric field.", + "description" : "The minimum value of a field.", "signatures" : [ { "params" : [ { - "name" : "number", + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", "type" : "datetime", "optional" : false, "description" : "" @@ -19,7 +31,7 @@ { "params" : [ { - "name" : "number", + "name" : "field", "type" : "double", "optional" : false, "description" : "" @@ -31,7 +43,7 @@ { "params" : [ { - "name" : "number", + "name" : "field", "type" : "integer", "optional" : false, "description" : "" @@ -43,7 +55,7 @@ { "params" : [ { - "name" : "number", + "name" : "field", "type" : "long", "optional" : false, "description" : "" diff --git a/docs/reference/esql/functions/kibana/docs/max.md b/docs/reference/esql/functions/kibana/docs/max.md index 9bda0fbbe972d..80e88885e7f34 100644 --- a/docs/reference/esql/functions/kibana/docs/max.md +++ b/docs/reference/esql/functions/kibana/docs/max.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MAX -The maximum value of a numeric field. +The maximum value of a field. ``` FROM employees diff --git a/docs/reference/esql/functions/kibana/docs/min.md b/docs/reference/esql/functions/kibana/docs/min.md index 100abf0260d0d..38d13b97fd344 100644 --- a/docs/reference/esql/functions/kibana/docs/min.md +++ b/docs/reference/esql/functions/kibana/docs/min.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MIN -The minimum value of a numeric field. +The minimum value of a field. ``` FROM employees diff --git a/docs/reference/esql/functions/parameters/max.asciidoc b/docs/reference/esql/functions/parameters/max.asciidoc index 91c56709d182a..8903aa1a472a3 100644 --- a/docs/reference/esql/functions/parameters/max.asciidoc +++ b/docs/reference/esql/functions/parameters/max.asciidoc @@ -2,5 +2,5 @@ *Parameters* -`number`:: +`field`:: diff --git a/docs/reference/esql/functions/parameters/min.asciidoc b/docs/reference/esql/functions/parameters/min.asciidoc index 91c56709d182a..8903aa1a472a3 100644 --- a/docs/reference/esql/functions/parameters/min.asciidoc +++ b/docs/reference/esql/functions/parameters/min.asciidoc @@ -2,5 +2,5 @@ *Parameters* -`number`:: +`field`:: diff --git a/docs/reference/esql/functions/signature/max.svg b/docs/reference/esql/functions/signature/max.svg index cfc7bfda2c0a0..dda43dfbfbba2 100644 --- a/docs/reference/esql/functions/signature/max.svg +++ b/docs/reference/esql/functions/signature/max.svg @@ -1 +1 @@ -MAX(number) \ No newline at end of file +MAX(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/min.svg b/docs/reference/esql/functions/signature/min.svg index 31660b1490e7e..e654d3027fee8 100644 --- a/docs/reference/esql/functions/signature/min.svg +++ b/docs/reference/esql/functions/signature/min.svg @@ -1 +1 @@ -MIN(number) \ No newline at end of file +MIN(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index cec61a56db87a..6515c6bfc48d2 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -4,7 +4,8 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -number | result +field | result +boolean | boolean datetime | datetime double | double integer | integer diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index cec61a56db87a..6515c6bfc48d2 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -4,7 +4,8 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -number | result +field | result +boolean | boolean datetime | datetime double | double integer | integer diff --git a/docs/reference/search/multi-search-template-api.asciidoc b/docs/reference/search/multi-search-template-api.asciidoc index c8eea52a6fd9b..b1c9518b1f2bc 100644 --- a/docs/reference/search/multi-search-template-api.asciidoc +++ b/docs/reference/search/multi-search-template-api.asciidoc @@ -22,9 +22,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/render-search-template-api.asciidoc b/docs/reference/search/render-search-template-api.asciidoc index 1f259dddf6879..0c782f26068e6 100644 --- a/docs/reference/search/render-search-template-api.asciidoc +++ b/docs/reference/search/render-search-template-api.asciidoc @@ -22,9 +22,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index 038396e558607..c60b5281c05e5 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -21,9 +21,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/search-your-data/search-template.asciidoc b/docs/reference/search/search-your-data/search-template.asciidoc index 7a7f09f4a37a7..489a03c0a6a2a 100644 --- a/docs/reference/search/search-your-data/search-template.asciidoc +++ b/docs/reference/search/search-your-data/search-template.asciidoc @@ -42,9 +42,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java index 0bfdf959f7b58..2c7ec70f36eb3 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java @@ -13,6 +13,7 @@ import com.sun.jna.NativeLong; import com.sun.jna.Pointer; import com.sun.jna.Structure; +import com.sun.jna.Structure.ByReference; import com.sun.jna.WString; import com.sun.jna.win32.StdCallLibrary; @@ -98,6 +99,38 @@ public long Type() { } } + /** + * Basic limit information for a job object + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx + */ + public static class JnaJobObjectBasicLimitInformation extends Structure implements ByReference, JobObjectBasicLimitInformation { + public byte[] _ignore1 = new byte[16]; + public int LimitFlags; + public byte[] _ignore2 = new byte[20]; + public int ActiveProcessLimit; + public byte[] _ignore3 = new byte[20]; + + public JnaJobObjectBasicLimitInformation() { + super(8); + } + + @Override + protected List getFieldOrder() { + return List.of("_ignore1", "LimitFlags", "_ignore2", "ActiveProcessLimit", "_ignore3"); + } + + @Override + public void setLimitFlags(int v) { + LimitFlags = v; + } + + @Override + public void setActiveProcessLimit(int v) { + ActiveProcessLimit = v; + } + } + /** * JNA adaptation of {@link ConsoleCtrlHandler} */ @@ -128,6 +161,20 @@ private interface NativeFunctions extends StdCallLibrary { int GetShortPathNameW(WString lpszLongPath, char[] lpszShortPath, int cchBuffer); boolean SetConsoleCtrlHandler(StdCallLibrary.StdCallCallback handler, boolean add); + + Pointer CreateJobObjectW(Pointer jobAttributes, String name); + + boolean AssignProcessToJobObject(Pointer job, Pointer process); + + boolean QueryInformationJobObject( + Pointer job, + int infoClass, + JnaJobObjectBasicLimitInformation info, + int infoLength, + Pointer returnLength + ); + + boolean SetInformationJobObject(Pointer job, int infoClass, JnaJobObjectBasicLimitInformation info, int infoLength); } private final NativeFunctions functions; @@ -197,4 +244,42 @@ public boolean SetConsoleCtrlHandler(ConsoleCtrlHandler handler, boolean add) { consoleCtrlHandlerCallback = new NativeHandlerCallback(handler); return functions.SetConsoleCtrlHandler(consoleCtrlHandlerCallback, true); } + + @Override + public Handle CreateJobObjectW() { + return new JnaHandle(functions.CreateJobObjectW(null, null)); + } + + @Override + public boolean AssignProcessToJobObject(Handle job, Handle process) { + assert job instanceof JnaHandle; + assert process instanceof JnaHandle; + var jnaJob = (JnaHandle) job; + var jnaProcess = (JnaHandle) process; + return functions.AssignProcessToJobObject(jnaJob.pointer, jnaProcess.pointer); + } + + @Override + public JobObjectBasicLimitInformation newJobObjectBasicLimitInformation() { + return new JnaJobObjectBasicLimitInformation(); + } + + @Override + public boolean QueryInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JnaHandle; + assert info instanceof JnaJobObjectBasicLimitInformation; + var jnaJob = (JnaHandle) job; + var jnaInfo = (JnaJobObjectBasicLimitInformation) info; + var ret = functions.QueryInformationJobObject(jnaJob.pointer, infoClass, jnaInfo, jnaInfo.size(), null); + return ret; + } + + @Override + public boolean SetInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JnaHandle; + assert info instanceof JnaJobObjectBasicLimitInformation; + var jnaJob = (JnaHandle) job; + var jnaInfo = (JnaJobObjectBasicLimitInformation) info; + return functions.SetInformationJobObject(jnaJob.pointer, infoClass, jnaInfo, jnaInfo.size()); + } } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaLinuxCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaLinuxCLibrary.java new file mode 100644 index 0000000000000..742c666d59c23 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaLinuxCLibrary.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.NativeLong; +import com.sun.jna.Pointer; +import com.sun.jna.Structure; + +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +class JnaLinuxCLibrary implements LinuxCLibrary { + + @Structure.FieldOrder({ "len", "filter" }) + public static final class JnaSockFProg extends Structure implements Structure.ByReference, SockFProg { + public short len; // number of filters + public Pointer filter; // filters + + JnaSockFProg(SockFilter filters[]) { + len = (short) filters.length; + // serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would need + Memory filter = new Memory(len * 8); + ByteBuffer bbuf = filter.getByteBuffer(0, len * 8); + bbuf.order(ByteOrder.nativeOrder()); // little endian + for (SockFilter f : filters) { + bbuf.putShort(f.code()); + bbuf.put(f.jt()); + bbuf.put(f.jf()); + bbuf.putInt(f.k()); + } + this.filter = filter; + } + + @Override + public long address() { + return Pointer.nativeValue(getPointer()); + } + } + + private interface NativeFunctions extends Library { + + /** + * maps to prctl(2) + */ + int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5); + + /** + * used to call seccomp(2), its too new... + * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing + */ + NativeLong syscall(NativeLong number, Object... args); + } + + private final NativeFunctions functions; + + JnaLinuxCLibrary() { + try { + this.functions = Native.load("c", NativeFunctions.class); + } catch (UnsatisfiedLinkError e) { + throw new UnsupportedOperationException( + "seccomp unavailable: could not link methods. requires kernel 3.5+ " + + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" + ); + } + } + + @Override + public SockFProg newSockFProg(SockFilter[] filters) { + var prog = new JnaSockFProg(filters); + prog.write(); + return prog; + } + + @Override + public int prctl(int option, long arg2, long arg3, long arg4, long arg5) { + return functions.prctl(option, new NativeLong(arg2), new NativeLong(arg3), new NativeLong(arg4), new NativeLong(arg5)); + } + + @Override + public long syscall(long number, int operation, int flags, long address) { + return functions.syscall(new NativeLong(number), operation, flags, address).longValue(); + } +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaMacCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaMacCLibrary.java new file mode 100644 index 0000000000000..f416cf862b417 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaMacCLibrary.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.ptr.PointerByReference; + +import org.elasticsearch.nativeaccess.lib.MacCLibrary; + +class JnaMacCLibrary implements MacCLibrary { + static class JnaErrorReference implements ErrorReference { + final PointerByReference ref = new PointerByReference(); + + @Override + public String toString() { + return ref.getValue().getString(0); + } + } + + private interface NativeFunctions extends Library { + int sandbox_init(String profile, long flags, PointerByReference errorbuf); + + void sandbox_free_error(Pointer errorbuf); + } + + private final NativeFunctions functions; + + JnaMacCLibrary() { + this.functions = Native.load("c", NativeFunctions.class); + } + + @Override + public ErrorReference newErrorReference() { + return new JnaErrorReference(); + } + + @Override + public int sandbox_init(String profile, long flags, ErrorReference errorbuf) { + assert errorbuf instanceof JnaErrorReference; + var jnaErrorbuf = (JnaErrorReference) errorbuf; + return functions.sandbox_init(profile, flags, jnaErrorbuf.ref); + } + + @Override + public void sandbox_free_error(ErrorReference errorbuf) { + assert errorbuf instanceof JnaErrorReference; + var jnaErrorbuf = (JnaErrorReference) errorbuf; + functions.sandbox_free_error(jnaErrorbuf.ref.getValue()); + } + +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 9d34b1ba617e8..454581ae70b51 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -10,6 +10,8 @@ import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; @@ -30,6 +32,10 @@ public JnaNativeLibraryProvider() { JnaJavaLibrary::new, PosixCLibrary.class, JnaPosixCLibrary::new, + LinuxCLibrary.class, + JnaLinuxCLibrary::new, + MacCLibrary.class, + JnaMacCLibrary::new, Kernel32Library.class, JnaKernel32Library::new, SystemdLibrary.class, diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index 7e8e4f23ab034..03a7b9c0869be 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -39,6 +39,50 @@ public long rlim_cur() { public long rlim_max() { return rlim_max.longValue(); } + + @Override + public void rlim_cur(long v) { + rlim_cur.setValue(v); + } + + @Override + public void rlim_max(long v) { + rlim_max.setValue(v); + } + } + + public static class JnaFStore extends Structure implements Structure.ByReference, FStore { + + public int fst_flags = 0; + public int fst_posmode = 0; + public NativeLong fst_offset = new NativeLong(0); + public NativeLong fst_length = new NativeLong(0); + public NativeLong fst_bytesalloc = new NativeLong(0); + + @Override + public void set_flags(int flags) { + this.fst_flags = flags; + } + + @Override + public void set_posmode(int posmode) { + this.fst_posmode = posmode; + } + + @Override + public void set_offset(long offset) { + fst_offset.setValue(offset); + } + + @Override + public void set_length(long length) { + fst_length.setValue(length); + } + + @Override + public long bytesalloc() { + return fst_bytesalloc.longValue(); + } } private interface NativeFunctions extends Library { @@ -46,8 +90,12 @@ private interface NativeFunctions extends Library { int getrlimit(int resource, JnaRLimit rlimit); + int setrlimit(int resource, JnaRLimit rlimit); + int mlockall(int flags); + int fcntl(int fd, int cmd, JnaFStore fst); + String strerror(int errno); } @@ -74,11 +122,30 @@ public int getrlimit(int resource, RLimit rlimit) { return functions.getrlimit(resource, jnaRlimit); } + @Override + public int setrlimit(int resource, RLimit rlimit) { + assert rlimit instanceof JnaRLimit; + var jnaRlimit = (JnaRLimit) rlimit; + return functions.setrlimit(resource, jnaRlimit); + } + @Override public int mlockall(int flags) { return functions.mlockall(flags); } + @Override + public FStore newFStore() { + return new JnaFStore(); + } + + @Override + public int fcntl(int fd, int cmd, FStore fst) { + assert fst instanceof JnaFStore; + var jnaFst = (JnaFStore) fst; + return functions.fcntl(fd, cmd, jnaFst); + } + @Override public String strerror(int errno) { return functions.strerror(errno); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java index 80a18a2bc8aa0..c10f57a900ff7 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java @@ -22,6 +22,7 @@ abstract class AbstractNativeAccess implements NativeAccess { private final JavaLibrary javaLib; private final Zstd zstd; protected boolean isMemoryLocked = false; + protected ExecSandboxState execSandboxState = ExecSandboxState.NONE; protected AbstractNativeAccess(String name, NativeLibraryProvider libraryProvider) { this.name = name; @@ -53,4 +54,9 @@ public CloseableByteBuffer newBuffer(int len) { public boolean isMemoryLocked() { return isMemoryLocked; } + + @Override + public ExecSandboxState getExecSandboxState() { + return execSandboxState; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index 7948dad1df4ad..c50e639c94d27 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -8,15 +8,88 @@ package org.elasticsearch.nativeaccess; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFProg; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFilter; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import java.util.Map; + class LinuxNativeAccess extends PosixNativeAccess { - Systemd systemd; + /** the preferred method is seccomp(2), since we can apply to all threads of the process */ + static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17 + static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17 + + /** otherwise, we can use prctl(2), which will at least protect ES application threads */ + static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5 + static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5 + static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23 + static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23 + static final long SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5 + + // BPF "macros" and constants + static final int BPF_LD = 0x00; + static final int BPF_W = 0x00; + static final int BPF_ABS = 0x20; + static final int BPF_JMP = 0x05; + static final int BPF_JEQ = 0x10; + static final int BPF_JGE = 0x30; + static final int BPF_JGT = 0x20; + static final int BPF_RET = 0x06; + static final int BPF_K = 0x00; + + static SockFilter BPF_STMT(int code, int k) { + return new SockFilter((short) code, (byte) 0, (byte) 0, k); + } + + static SockFilter BPF_JUMP(int code, int k, int jt, int jf) { + return new SockFilter((short) code, (byte) jt, (byte) jf, k); + } + + static final int SECCOMP_RET_ERRNO = 0x00050000; + static final int SECCOMP_RET_DATA = 0x0000FFFF; + static final int SECCOMP_RET_ALLOW = 0x7FFF0000; + + // some errno constants for error checking/handling + static final int EACCES = 0x0D; + static final int EFAULT = 0x0E; + static final int EINVAL = 0x16; + static final int ENOSYS = 0x26; + + // offsets that our BPF checks + // check with offsetof() when adding a new arch, move to Arch if different. + static final int SECCOMP_DATA_NR_OFFSET = 0x00; + static final int SECCOMP_DATA_ARCH_OFFSET = 0x04; + + record Arch( + int audit, // AUDIT_ARCH_XXX constant from linux/audit.h + int limit, // syscall limit (necessary for blacklisting on amd64, to ban 32-bit syscalls) + int fork, // __NR_fork + int vfork, // __NR_vfork + int execve, // __NR_execve + int execveat, // __NR_execveat + int seccomp // __NR_seccomp + ) {} + + /** supported architectures for seccomp keyed by os.arch */ + private static final Map ARCHITECTURES; + static { + ARCHITECTURES = Map.of( + "amd64", + new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317), + "aarch64", + new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277) + ); + } + + private final LinuxCLibrary linuxLibc; + private final Systemd systemd; LinuxNativeAccess(NativeLibraryProvider libraryProvider) { super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8)); + this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class); this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); } @@ -46,4 +119,197 @@ protected void logMemoryLimitInstructions() { \t{} hard memlock unlimited""", user, user, user); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); } + + /** + * Installs exec system call filtering for Linux. + *

+ * On Linux exec system call filtering currently supports amd64 and aarch64 architectures. + * It requires Linux kernel 3.5 or above, and {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} + * compiled into the kernel. + *

+ * On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)} + * is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation + * here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method + * which will at least protect elasticsearch application threads. + *

+ * Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls: + *

+ * @see + * * http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt + */ + @Override + public void tryInstallExecSandbox() { + // first be defensive: we can give nice errors this way, at the very least. + // also, some of these security features get backported to old versions, checking kernel version here is a big no-no! + String archId = System.getProperty("os.arch"); + final Arch arch = ARCHITECTURES.get(archId); + if (arch == null) { + throw new UnsupportedOperationException("seccomp unavailable: '" + archId + "' architecture unsupported"); + } + + // try to check system calls really are who they claim + // you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57) + final int bogusArg = 0xf7a46a5c; + + // test seccomp(BOGUS) + long ret = linuxLibc.syscall(arch.seccomp, bogusArg, 0, 0); + if (ret != -1) { + throw new UnsupportedOperationException("seccomp unavailable: seccomp(BOGUS_OPERATION) returned " + ret); + } else { + int errno = libc.errno(); + switch (errno) { + case ENOSYS: + break; // ok + case EINVAL: + break; // ok + default: + throw new UnsupportedOperationException("seccomp(BOGUS_OPERATION): " + libc.strerror(errno)); + } + } + + // test seccomp(VALID, BOGUS) + ret = linuxLibc.syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, bogusArg, 0); + if (ret != -1) { + throw new UnsupportedOperationException("seccomp unavailable: seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG) returned " + ret); + } else { + int errno = libc.errno(); + switch (errno) { + case ENOSYS: + break; // ok + case EINVAL: + break; // ok + default: + throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + libc.strerror(errno)); + } + } + + // test prctl(BOGUS) + ret = linuxLibc.prctl(bogusArg, 0, 0, 0, 0); + if (ret != -1) { + throw new UnsupportedOperationException("seccomp unavailable: prctl(BOGUS_OPTION) returned " + ret); + } else { + int errno = libc.errno(); + switch (errno) { + case ENOSYS: + break; // ok + case EINVAL: + break; // ok + default: + throw new UnsupportedOperationException("prctl(BOGUS_OPTION): " + libc.strerror(errno)); + } + } + + // now just normal defensive checks + + // check for GET_NO_NEW_PRIVS + switch (linuxLibc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)) { + case 0: + break; // not yet set + case 1: + break; // already set by caller + default: + int errno = libc.errno(); + if (errno == EINVAL) { + // friendly error, this will be the typical case for an old kernel + throw new UnsupportedOperationException( + "seccomp unavailable: requires kernel 3.5+ with" + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" + ); + } else { + throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + libc.strerror(errno)); + } + } + // check for SECCOMP + switch (linuxLibc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0)) { + case 0: + break; // not yet set + case 2: + break; // already in filter mode by caller + default: + int errno = libc.errno(); + if (errno == EINVAL) { + throw new UnsupportedOperationException( + "seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," + + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" + ); + } else { + throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + libc.strerror(errno)); + } + } + // check for SECCOMP_MODE_FILTER + if (linuxLibc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) != 0) { + int errno = libc.errno(); + switch (errno) { + case EFAULT: + break; // available + case EINVAL: + throw new UnsupportedOperationException( + "seccomp unavailable: CONFIG_SECCOMP_FILTER not" + + " compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" + ); + default: + throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + libc.strerror(errno)); + } + } + + // ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user + if (linuxLibc.prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) { + throw new UnsupportedOperationException("prctl(PR_SET_NO_NEW_PRIVS): " + libc.strerror(libc.errno())); + } + + // check it worked + if (linuxLibc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { + throw new UnsupportedOperationException( + "seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + libc.strerror(libc.errno()) + ); + } + + // BPF installed to check arch, limit, then syscall. + // See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details. + SockFilter insns[] = { + /* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), // + /* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail; + /* 3 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), // + /* 4 */ BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, arch.limit, 5, 0), // if (syscall > LIMIT) goto fail; + /* 5 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.fork, 4, 0), // if (syscall == FORK) goto fail; + /* 6 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.vfork, 3, 0), // if (syscall == VFORK) goto fail; + /* 7 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execve, 2, 0), // if (syscall == EXECVE) goto fail; + /* 8 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execveat, 1, 0), // if (syscall == EXECVEAT) goto fail; + /* 9 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), // pass: return OK; + /* 10 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES; + }; + // seccomp takes a long, so we pass it one explicitly to keep the JNA simple + SockFProg prog = linuxLibc.newSockFProg(insns); + + int method = 1; + // install filter, if this works, after this there is no going back! + // first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl() + if (linuxLibc.syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, prog.address()) != 0) { + method = 0; + int errno1 = libc.errno(); + if (logger.isDebugEnabled()) { + logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", libc.strerror(errno1)); + } + if (linuxLibc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, prog.address(), 0, 0) != 0) { + int errno2 = libc.errno(); + throw new UnsupportedOperationException( + "seccomp(SECCOMP_SET_MODE_FILTER): " + libc.strerror(errno1) + ", prctl(PR_SET_SECCOMP): " + libc.strerror(errno2) + ); + } + } + + // now check that the filter was really installed, we should be in filter mode. + if (linuxLibc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) { + throw new UnsupportedOperationException( + "seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + libc.strerror(libc.errno()) + ); + } + + logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app"); + execSandboxState = method == 1 ? ExecSandboxState.ALL_THREADS : ExecSandboxState.EXISTING_THREADS; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java index 0388c66d3962f..c53b7ba6ac2f0 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java @@ -8,12 +8,30 @@ package org.elasticsearch.nativeaccess; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary.RLimit; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; class MacNativeAccess extends PosixNativeAccess { + /** The only supported flag... */ + static final int SANDBOX_NAMED = 1; + /** Allow everything except process fork and execution */ + static final String SANDBOX_RULES = "(version 1) (allow default) (deny process-fork) (deny process-exec)"; + + private final MacCLibrary macLibc; + MacNativeAccess(NativeLibraryProvider libraryProvider) { super("MacOS", libraryProvider, new PosixConstants(9223372036854775807L, 5, 1, 6)); + this.macLibc = libraryProvider.getLibrary(MacCLibrary.class); } @Override @@ -25,4 +43,69 @@ protected long getMaxThreads() { protected void logMemoryLimitInstructions() { // we don't have instructions for macos } + + /** + * Installs exec system call filtering on MacOS. + *

+ * Two different methods of filtering are used. Since MacOS is BSD based, process creation + * is first restricted with {@code setrlimit(RLIMIT_NPROC)}. + *

+ * Additionally, on Mac OS X Leopard or above, a custom {@code sandbox(7)} ("Seatbelt") profile is installed that + * denies the following rules: + *

+ * @see + * * https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf + */ + @Override + public void tryInstallExecSandbox() { + initBsdSandbox(); + initMacSandbox(); + execSandboxState = ExecSandboxState.ALL_THREADS; + } + + @SuppressForbidden(reason = "Java tmp dir is ok") + private static Path createTempRulesFile() throws IOException { + return Files.createTempFile("es", "sb"); + } + + private void initMacSandbox() { + // write rules to a temporary file, which will be passed to sandbox_init() + Path rules; + try { + rules = createTempRulesFile(); + Files.write(rules, Collections.singleton(SANDBOX_RULES)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + try { + var errorRef = macLibc.newErrorReference(); + int ret = macLibc.sandbox_init(rules.toAbsolutePath().toString(), SANDBOX_NAMED, errorRef); + // if sandbox_init() fails, add the message from the OS (e.g. syntax error) and free the buffer + if (ret != 0) { + RuntimeException e = new UnsupportedOperationException("sandbox_init(): " + errorRef.toString()); + macLibc.sandbox_free_error(errorRef); + throw e; + } + logger.debug("OS X seatbelt initialization successful"); + } finally { + IOUtils.deleteFilesIgnoringExceptions(rules); + } + } + + private void initBsdSandbox() { + RLimit limit = libc.newRLimit(); + limit.rlim_cur(0); + limit.rlim_max(0); + // not a standard limit, means something different on linux, etc! + final int RLIMIT_NPROC = 7; + if (libc.setrlimit(RLIMIT_NPROC, limit) != 0) { + throw new UnsupportedOperationException("RLIMIT_NPROC unavailable: " + libc.strerror(libc.errno())); + } + + logger.debug("BSD RLIMIT_NPROC initialization successful"); + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java index 7f91d0425af47..61935ac93c5a3 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -44,6 +44,16 @@ static NativeAccess instance() { */ boolean isMemoryLocked(); + /** + * Attempts to install a system call filter to block process execution. + */ + void tryInstallExecSandbox(); + + /** + * Return whether installing the exec system call filters was successful, and to what degree. + */ + ExecSandboxState getExecSandboxState(); + Systemd systemd(); /** @@ -71,4 +81,16 @@ default WindowsFunctions getWindowsFunctions() { * @return the buffer */ CloseableByteBuffer newBuffer(int len); + + /** + * Possible stats for execution filtering. + */ + enum ExecSandboxState { + /** No execution filtering */ + NONE, + /** Exec is blocked for threads that were already created */ + EXISTING_THREADS, + /** Exec is blocked for all current and future threads */ + ALL_THREADS + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java index c0eed4a9ce09b..fc186cb03b0d9 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java @@ -41,6 +41,16 @@ public boolean isMemoryLocked() { return false; } + @Override + public void tryInstallExecSandbox() { + logger.warn("Cannot install system call filter because native access is not available"); + } + + @Override + public ExecSandboxState getExecSandboxState() { + return ExecSandboxState.NONE; + } + @Override public Systemd systemd() { logger.warn("Cannot get systemd access because native access is not available"); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java index 843cc73fbed02..a9ccd15330595 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java @@ -27,6 +27,16 @@ class WindowsNativeAccess extends AbstractNativeAccess { public static final int PAGE_GUARD = 0x0100; public static final int MEM_COMMIT = 0x1000; + /** + * Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject + */ + private static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2; + + /** + * Constant for LimitFlags, indicating a process limit has been set + */ + private static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8; + private final Kernel32Library kernel; private final WindowsFunctions windowsFunctions; @@ -68,6 +78,47 @@ public void tryLockMemory() { // note: no need to close the process handle because GetCurrentProcess returns a pseudo handle } + /** + * Install exec system call filtering on Windows. + *

+ * Process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}. + *

+ * Note: This is not intended as a real sandbox. It is another level of security, mostly intended to annoy + * security researchers and make their lives more difficult in achieving "remote execution" exploits. + */ + @Override + public void tryInstallExecSandbox() { + // create a new Job + Handle job = kernel.CreateJobObjectW(); + if (job == null) { + throw new UnsupportedOperationException("CreateJobObject: " + kernel.GetLastError()); + } + + try { + // retrieve the current basic limits of the job + int clazz = JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS; + var info = kernel.newJobObjectBasicLimitInformation(); + if (kernel.QueryInformationJobObject(job, clazz, info) == false) { + throw new UnsupportedOperationException("QueryInformationJobObject: " + kernel.GetLastError()); + } + // modify the number of active processes to be 1 (exactly the one process we will add to the job). + info.setActiveProcessLimit(1); + info.setLimitFlags(JOB_OBJECT_LIMIT_ACTIVE_PROCESS); + if (kernel.SetInformationJobObject(job, clazz, info) == false) { + throw new UnsupportedOperationException("SetInformationJobObject: " + kernel.GetLastError()); + } + // assign ourselves to the job + if (kernel.AssignProcessToJobObject(job, kernel.GetCurrentProcess()) == false) { + throw new UnsupportedOperationException("AssignProcessToJobObject: " + kernel.GetLastError()); + } + } finally { + kernel.CloseHandle(job); + } + + execSandboxState = ExecSandboxState.ALL_THREADS; + logger.debug("Windows ActiveProcessLimit initialization successful"); + } + @Override public ProcessLimits getProcessLimits() { return new ProcessLimits(ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java index 43337f4532bed..dd786b56087e2 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java @@ -101,4 +101,65 @@ interface MemoryBasicInformation { * @see SetConsoleCtrlHandler docs */ boolean SetConsoleCtrlHandler(ConsoleCtrlHandler handler, boolean add); + + /** + * Creates or opens a new job object + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx + * Note: the two params to this are omitted because all implementations pass null for them both + * + * @return job handle if the function succeeds + */ + Handle CreateJobObjectW(); + + /** + * Associates a process with an existing job + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx + * + * @param job job handle + * @param process process handle + * @return true if the function succeeds + */ + boolean AssignProcessToJobObject(Handle job, Handle process); + + /** + * Basic limit information for a job object + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx + */ + interface JobObjectBasicLimitInformation { + void setLimitFlags(int v); + + void setActiveProcessLimit(int v); + } + + JobObjectBasicLimitInformation newJobObjectBasicLimitInformation(); + + /** + * Get job limit and state information + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx + * Note: The infoLength parameter is omitted because implementions handle passing it + * Note: The returnLength parameter is omitted because all implementations pass null + * + * @param job job handle + * @param infoClass information class constant + * @param info pointer to information structure + * @return true if the function succeeds + */ + boolean QueryInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info); + + /** + * Set job limit and state information + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx + * Note: The infoLength parameter is omitted because implementions handle passing it + * + * @param job job handle + * @param infoClass information class constant + * @param info pointer to information structure + * @return true if the function succeeds + */ + boolean SetInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LinuxCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LinuxCLibrary.java new file mode 100644 index 0000000000000..2a7b10ff3588f --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LinuxCLibrary.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +public non-sealed interface LinuxCLibrary extends NativeLibrary { + + /** + * Corresponds to struct sock_filter + * @param code insn + * @param jt number of insn to jump (skip) if true + * @param jf number of insn to jump (skip) if false + * @param k additional data + */ + record SockFilter(short code, byte jt, byte jf, int k) {} + + interface SockFProg { + long address(); + } + + SockFProg newSockFProg(SockFilter filters[]); + + /** + * maps to prctl(2) + */ + int prctl(int option, long arg2, long arg3, long arg4, long arg5); + + /** + * used to call seccomp(2), its too new... + * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing + */ + long syscall(long number, int operation, int flags, long address); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/MacCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/MacCLibrary.java new file mode 100644 index 0000000000000..b2b2db9c71c90 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/MacCLibrary.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +public non-sealed interface MacCLibrary extends NativeLibrary { + interface ErrorReference {} + + ErrorReference newErrorReference(); + + /** + * maps to sandbox_init(3), since Leopard + */ + int sandbox_init(String profile, long flags, ErrorReference errorbuf); + + /** + * releases memory when an error occurs during initialization (e.g. syntax bug) + */ + void sandbox_free_error(ErrorReference errorbuf); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index d8098a78935b8..faa0e861dc63f 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,4 +9,5 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, Kernel32Library, SystemdLibrary, VectorLibrary, ZstdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, SystemdLibrary, + VectorLibrary, ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index 96e2a0d0e1cdf..d8db5fa070126 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -26,6 +26,10 @@ interface RLimit { long rlim_cur(); long rlim_max(); + + void rlim_cur(long v); + + void rlim_max(long v); } /** @@ -41,6 +45,8 @@ interface RLimit { */ int getrlimit(int resource, RLimit rlimit); + int setrlimit(int resource, RLimit rlimit); + /** * Lock all the current process's virtual address space into RAM. * @param flags flags determining how memory will be locked @@ -49,6 +55,22 @@ interface RLimit { */ int mlockall(int flags); + interface FStore { + void set_flags(int flags); /* IN: flags word */ + + void set_posmode(int posmode); /* IN: indicates offset field */ + + void set_offset(long offset); /* IN: start of the region */ + + void set_length(long length); /* IN: size of the region */ + + long bytesalloc(); /* OUT: number of bytes allocated */ + } + + FStore newFStore(); + + int fcntl(int fd, int cmd, FStore fst); + /** * Return a string description for an error. * diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java index bbfd26bd061d0..f5eb5238dad93 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java @@ -72,6 +72,22 @@ class JdkKernel32Library implements Kernel32Library { "handle", ConsoleCtrlHandler_handle$fd ); + private static final MethodHandle CreateJobObjectW$mh = downcallHandleWithError( + "CreateJobObjectW", + FunctionDescriptor.of(ADDRESS, ADDRESS, ADDRESS) + ); + private static final MethodHandle AssignProcessToJobObject$mh = downcallHandleWithError( + "AssignProcessToJobObject", + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, ADDRESS) + ); + private static final MethodHandle QueryInformationJobObject$mh = downcallHandleWithError( + "QueryInformationJobObject", + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, JAVA_INT, ADDRESS, JAVA_INT, ADDRESS) + ); + private static final MethodHandle SetInformationJobObject$mh = downcallHandleWithError( + "SetInformationJobObject", + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, JAVA_INT, ADDRESS, JAVA_INT) + ); private static MethodHandle downcallHandleWithError(String function, FunctionDescriptor functionDescriptor) { return downcallHandle(function, functionDescriptor, CAPTURE_GETLASTERROR_OPTION); @@ -146,6 +162,37 @@ public long Type() { } } + static class JdkJobObjectBasicLimitInformation implements JobObjectBasicLimitInformation { + private static final MemoryLayout layout = MemoryLayout.structLayout( + paddingLayout(16), + JAVA_INT, + paddingLayout(20), + JAVA_INT, + paddingLayout(20) + ).withByteAlignment(8); + + private static final VarHandle LimitFlags$vh = varHandleWithoutOffset(layout, groupElement(1)); + private static final VarHandle ActiveProcessLimit$vh = varHandleWithoutOffset(layout, groupElement(3)); + + private final MemorySegment segment; + + JdkJobObjectBasicLimitInformation() { + var arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + segment.fill((byte) 0); + } + + @Override + public void setLimitFlags(int v) { + LimitFlags$vh.set(segment, v); + } + + @Override + public void setActiveProcessLimit(int v) { + ActiveProcessLimit$vh.set(segment, v); + } + } + private final MemorySegment lastErrorState; JdkKernel32Library() { @@ -262,4 +309,73 @@ public boolean SetConsoleCtrlHandler(ConsoleCtrlHandler handler, boolean add) { throw new AssertionError(t); } } + + @Override + public Handle CreateJobObjectW() { + try { + return new JdkHandle((MemorySegment) CreateJobObjectW$mh.invokeExact(lastErrorState, MemorySegment.NULL, MemorySegment.NULL)); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public boolean AssignProcessToJobObject(Handle job, Handle process) { + assert job instanceof JdkHandle; + assert process instanceof JdkHandle; + var jdkJob = (JdkHandle) job; + var jdkProcess = (JdkHandle) process; + + try { + return (boolean) AssignProcessToJobObject$mh.invokeExact(lastErrorState, jdkJob.address, jdkProcess.address); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public JobObjectBasicLimitInformation newJobObjectBasicLimitInformation() { + return new JdkJobObjectBasicLimitInformation(); + } + + @Override + public boolean QueryInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JdkHandle; + assert info instanceof JdkJobObjectBasicLimitInformation; + var jdkJob = (JdkHandle) job; + var jdkInfo = (JdkJobObjectBasicLimitInformation) info; + + try { + return (boolean) QueryInformationJobObject$mh.invokeExact( + lastErrorState, + jdkJob.address, + infoClass, + jdkInfo.segment, + (int) jdkInfo.segment.byteSize(), + MemorySegment.NULL + ); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public boolean SetInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JdkHandle; + assert info instanceof JdkJobObjectBasicLimitInformation; + var jdkJob = (JdkHandle) job; + var jdkInfo = (JdkJobObjectBasicLimitInformation) info; + + try { + return (boolean) SetInformationJobObject$mh.invokeExact( + lastErrorState, + jdkJob.address, + infoClass, + jdkInfo.segment, + (int) jdkInfo.segment.byteSize() + ); + } catch (Throwable t) { + throw new AssertionError(t); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkLinuxCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkLinuxCLibrary.java new file mode 100644 index 0000000000000..700941e7e1db0 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkLinuxCLibrary.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; + +import java.lang.foreign.Arena; +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.Linker; +import java.lang.foreign.MemoryLayout; +import java.lang.foreign.MemorySegment; +import java.lang.invoke.MethodHandle; + +import static java.lang.foreign.MemoryLayout.paddingLayout; +import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BYTE; +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static java.lang.foreign.ValueLayout.JAVA_SHORT; +import static org.elasticsearch.nativeaccess.jdk.JdkPosixCLibrary.CAPTURE_ERRNO_OPTION; +import static org.elasticsearch.nativeaccess.jdk.JdkPosixCLibrary.downcallHandleWithErrno; +import static org.elasticsearch.nativeaccess.jdk.JdkPosixCLibrary.errnoState; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkLinuxCLibrary implements LinuxCLibrary { + private static final MethodHandle prctl$mh; + static { + try { + prctl$mh = downcallHandleWithErrno( + "prctl", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_LONG, JAVA_LONG, JAVA_LONG, JAVA_LONG) + ); + } catch (UnsatisfiedLinkError e) { + throw new UnsupportedOperationException( + "seccomp unavailable: could not link methods. requires kernel 3.5+ " + + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" + ); + } + } + private static final MethodHandle syscall$mh = downcallHandle( + "syscall", + FunctionDescriptor.of(JAVA_LONG, JAVA_LONG, JAVA_INT, JAVA_INT, JAVA_LONG), + CAPTURE_ERRNO_OPTION, + Linker.Option.firstVariadicArg(1) + ); + + private static class JdkSockFProg implements SockFProg { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_SHORT, paddingLayout(6), ADDRESS); + + private final MemorySegment segment; + + JdkSockFProg(SockFilter filters[]) { + Arena arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + var instSegment = arena.allocate(filters.length * 8L); + segment.set(JAVA_SHORT, 0, (short) filters.length); + segment.set(ADDRESS, 8, instSegment); + + int offset = 0; + for (SockFilter f : filters) { + instSegment.set(JAVA_SHORT, offset, f.code()); + instSegment.set(JAVA_BYTE, offset + 2, f.jt()); + instSegment.set(JAVA_BYTE, offset + 3, f.jf()); + instSegment.set(JAVA_INT, offset + 4, f.k()); + offset += 8; + } + } + + @Override + public long address() { + return segment.address(); + } + } + + @Override + public SockFProg newSockFProg(SockFilter[] filters) { + return new JdkSockFProg(filters); + } + + @Override + public int prctl(int option, long arg2, long arg3, long arg4, long arg5) { + try { + return (int) prctl$mh.invokeExact(errnoState, option, arg2, arg3, arg4, arg5); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long syscall(long number, int operation, int flags, long address) { + try { + return (long) syscall$mh.invokeExact(errnoState, number, operation, flags, address); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkMacCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkMacCLibrary.java new file mode 100644 index 0000000000000..b946ca3ca4353 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkMacCLibrary.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.MacCLibrary; + +import java.lang.foreign.Arena; +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.lang.invoke.MethodHandle; + +import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkMacCLibrary implements MacCLibrary { + + private static final MethodHandle sandbox_init$mh = downcallHandle( + "sandbox_init", + FunctionDescriptor.of(JAVA_INT, ADDRESS, JAVA_LONG, ADDRESS) + ); + private static final MethodHandle sandbox_free_error$mh = downcallHandle("sandbox_free_error", FunctionDescriptor.ofVoid(ADDRESS)); + + private static class JdkErrorReference implements ErrorReference { + final Arena arena = Arena.ofConfined(); + final MemorySegment segment = arena.allocate(ValueLayout.ADDRESS); + + MemorySegment deref() { + return segment.get(ADDRESS, 0); + } + + @Override + public String toString() { + return deref().reinterpret(Long.MAX_VALUE).getUtf8String(0); + } + } + + @Override + public ErrorReference newErrorReference() { + return new JdkErrorReference(); + } + + @Override + public int sandbox_init(String profile, long flags, ErrorReference errorbuf) { + assert errorbuf instanceof JdkErrorReference; + var jdkErrorbuf = (JdkErrorReference) errorbuf; + try (Arena arena = Arena.ofConfined()) { + MemorySegment nativeProfile = MemorySegmentUtil.allocateString(arena, profile); + return (int) sandbox_init$mh.invokeExact(nativeProfile, flags, jdkErrorbuf.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public void sandbox_free_error(ErrorReference errorbuf) { + assert errorbuf instanceof JdkErrorReference; + var jdkErrorbuf = (JdkErrorReference) errorbuf; + try { + sandbox_free_error$mh.invokeExact(jdkErrorbuf.deref()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index d76170a55284c..cbd43a394379b 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -10,6 +10,8 @@ import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import org.elasticsearch.nativeaccess.lib.SystemdLibrary; @@ -28,6 +30,10 @@ public JdkNativeLibraryProvider() { JdkJavaLibrary::new, PosixCLibrary.class, JdkPosixCLibrary::new, + LinuxCLibrary.class, + JdkLinuxCLibrary::new, + MacCLibrary.class, + JdkMacCLibrary::new, Kernel32Library.class, JdkKernel32Library::new, SystemdLibrary.class, diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 43ec9425ccfaa..1a65225873c1d 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -43,7 +43,12 @@ class JdkPosixCLibrary implements PosixCLibrary { "getrlimit", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS) ); + private static final MethodHandle setrlimit$mh = downcallHandleWithErrno( + "setrlimit", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS) + ); private static final MethodHandle mlockall$mh = downcallHandleWithErrno("mlockall", FunctionDescriptor.of(JAVA_INT, JAVA_INT)); + private static final MethodHandle fcntl$mh = downcallHandle("fcntl", FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, ADDRESS)); static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); @@ -91,6 +96,17 @@ public int getrlimit(int resource, RLimit rlimit) { } } + @Override + public int setrlimit(int resource, RLimit rlimit) { + assert rlimit instanceof JdkRLimit; + var jdkRlimit = (JdkRLimit) rlimit; + try { + return (int) setrlimit$mh.invokeExact(errnoState, resource, jdkRlimit.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + @Override public int mlockall(int flags) { try { @@ -100,6 +116,22 @@ public int mlockall(int flags) { } } + @Override + public FStore newFStore() { + return new JdkFStore(); + } + + @Override + public int fcntl(int fd, int cmd, FStore fst) { + assert fst instanceof JdkFStore; + var jdkFst = (JdkFStore) fst; + try { + return (int) fcntl$mh.invokeExact(errnoState, fd, cmd, jdkFst.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + static class JdkRLimit implements RLimit { private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); private static final VarHandle rlim_cur$vh = varHandleWithoutOffset(layout, groupElement(0)); @@ -122,9 +154,60 @@ public long rlim_max() { return (long) rlim_max$vh.get(segment); } + @Override + public void rlim_cur(long v) { + rlim_cur$vh.set(segment, v); + } + + @Override + public void rlim_max(long v) { + rlim_max$vh.set(segment, v); + } + @Override public String toString() { return "JdkRLimit[rlim_cur=" + rlim_cur() + ", rlim_max=" + rlim_max(); } } + + private static class JdkFStore implements FStore { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_INT, JAVA_INT, JAVA_LONG, JAVA_LONG, JAVA_LONG); + private static final VarHandle st_flags$vh = layout.varHandle(groupElement(0)); + private static final VarHandle st_posmode$vh = layout.varHandle(groupElement(1)); + private static final VarHandle st_offset$vh = layout.varHandle(groupElement(2)); + private static final VarHandle st_length$vh = layout.varHandle(groupElement(3)); + private static final VarHandle st_bytesalloc$vh = layout.varHandle(groupElement(4)); + + private final MemorySegment segment; + + JdkFStore() { + var arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + } + + @Override + public void set_flags(int flags) { + st_flags$vh.set(segment, flags); + } + + @Override + public void set_posmode(int posmode) { + st_posmode$vh.set(segment, posmode); + } + + @Override + public void set_offset(long offset) { + st_offset$vh.get(segment, offset); + } + + @Override + public void set_length(long length) { + st_length$vh.set(segment, length); + } + + @Override + public long bytesalloc() { + return (long) st_bytesalloc$vh.get(segment); + } + } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/SystemCallFilterTests.java similarity index 84% rename from qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java rename to libs/native/src/test/java/org/elasticsearch/nativeaccess/SystemCallFilterTests.java index c62522880869b..d4bac13990898 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/SystemCallFilterTests.java @@ -6,12 +6,16 @@ * Side Public License, v 1. */ -package org.elasticsearch.bootstrap; +package org.elasticsearch.nativeaccess; import org.apache.lucene.util.Constants; import org.elasticsearch.test.ESTestCase; +import static org.apache.lucene.tests.util.LuceneTestCase.assumeTrue; +import static org.junit.Assert.fail; + /** Simple tests system call filter is working. */ +@ESTestCase.WithoutSecurityManager public class SystemCallFilterTests extends ESTestCase { /** command to try to run in tests */ @@ -20,15 +24,18 @@ public class SystemCallFilterTests extends ESTestCase { @Override public void setUp() throws Exception { super.setUp(); - assumeTrue("requires system call filter installation", Natives.isSystemCallFilterInstalled()); + assumeTrue( + "requires system call filter installation", + NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.NONE + ); // otherwise security manager will block the execution, no fun assumeTrue("cannot test with security manager enabled", System.getSecurityManager() == null); // otherwise, since we don't have TSYNC support, rules are not applied to the test thread // (randomizedrunner class initialization happens in its own thread, after the test thread is created) // instead we just forcefully run it for the test thread here. - if (JNANatives.LOCAL_SYSTEM_CALL_FILTER_ALL == false) { + if (NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.ALL_THREADS) { try { - SystemCallFilter.init(createTempDir()); + NativeAccess.instance().tryInstallExecSandbox(); } catch (Exception e) { throw new RuntimeException("unable to forcefully apply system call filter to test thread", e); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 5239e96856b7f..13394a2a0c7cc 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -318,22 +318,19 @@ public void requestReschedule() { } private void cleanDatabases() { - long expiredDatabases = state.getDatabases() + List> expiredDatabases = state.getDatabases() .entrySet() .stream() .filter(e -> e.getValue().isValid(clusterService.state().metadata().settings()) == false) - .peek(e -> { - String name = e.getKey(); - Metadata meta = e.getValue(); - deleteOldChunks(name, meta.lastChunk() + 1); - state = state.put( - name, - new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1) - ); - updateTaskState(); - }) - .count(); - stats = stats.expiredDatabases((int) expiredDatabases); + .toList(); + expiredDatabases.forEach(e -> { + String name = e.getKey(); + Metadata meta = e.getValue(); + deleteOldChunks(name, meta.lastChunk() + 1); + state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1)); + updateTaskState(); + }); + stats = stats.expiredDatabases(expiredDatabases.size()); } @Override diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 4834c581e9386..6a83fe69473f7 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -30,11 +30,17 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats; import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.PersistentTaskResponse; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -49,6 +55,9 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.temporal.ChronoUnit; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -63,6 +72,8 @@ import static org.elasticsearch.ingest.geoip.GeoIpDownloader.MAX_CHUNK_SIZE; import static org.elasticsearch.tasks.TaskId.EMPTY_TASK_ID; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; @@ -76,8 +87,9 @@ public class GeoIpDownloaderTests extends ESTestCase { private GeoIpDownloader geoIpDownloader; @Before - public void setup() { + public void setup() throws IOException { httpClient = mock(HttpClient.class); + when(httpClient.getBytes(anyString())).thenReturn("[]".getBytes(StandardCharsets.UTF_8)); clusterService = mock(ClusterService.class); threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); when(clusterService.getClusterSettings()).thenReturn( @@ -109,7 +121,13 @@ public void setup() { () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), () -> true - ); + ) { + { + GeoIpTaskParams geoIpTaskParams = mock(GeoIpTaskParams.class); + when(geoIpTaskParams.getWriteableName()).thenReturn(GeoIpDownloader.GEOIP_DOWNLOADER); + init(new PersistentTasksService(clusterService, threadPool, client), null, null, 0); + } + }; } @After @@ -541,6 +559,78 @@ public void testUpdateDatabasesIndexNotReady() { verifyNoInteractions(httpClient); } + public void testThatRunDownloaderDeletesExpiredDatabases() { + /* + * This test puts some expired databases and some non-expired ones into the GeoIpTaskState, and then calls runDownloader(), making + * sure that the expired databases have been deleted. + */ + AtomicInteger updatePersistentTaskStateCount = new AtomicInteger(0); + AtomicInteger deleteCount = new AtomicInteger(0); + int expiredDatabasesCount = randomIntBetween(1, 100); + int unexpiredDatabasesCount = randomIntBetween(0, 100); + Map databases = new HashMap<>(); + for (int i = 0; i < expiredDatabasesCount; i++) { + databases.put("expiredDatabase" + i, newGeoIpTaskStateMetadata(true)); + } + for (int i = 0; i < unexpiredDatabasesCount; i++) { + databases.put("unexpiredDatabase" + i, newGeoIpTaskStateMetadata(false)); + } + GeoIpTaskState geoIpTaskState = new GeoIpTaskState(databases); + geoIpDownloader.setState(geoIpTaskState); + client.addHandler( + UpdatePersistentTaskStatusAction.INSTANCE, + (UpdatePersistentTaskStatusAction.Request request, ActionListener taskResponseListener) -> { + PersistentTasksCustomMetadata.Assignment assignment = mock(PersistentTasksCustomMetadata.Assignment.class); + PersistentTasksCustomMetadata.PersistentTask persistentTask = new PersistentTasksCustomMetadata.PersistentTask<>( + GeoIpDownloader.GEOIP_DOWNLOADER, + GeoIpDownloader.GEOIP_DOWNLOADER, + new GeoIpTaskParams(), + request.getAllocationId(), + assignment + ); + updatePersistentTaskStateCount.incrementAndGet(); + taskResponseListener.onResponse(new PersistentTaskResponse(new PersistentTask<>(persistentTask, request.getState()))); + } + ); + client.addHandler( + DeleteByQueryAction.INSTANCE, + (DeleteByQueryRequest request, ActionListener flushResponseActionListener) -> { + deleteCount.incrementAndGet(); + } + ); + geoIpDownloader.runDownloader(); + assertThat(geoIpDownloader.getStatus().getExpiredDatabases(), equalTo(expiredDatabasesCount)); + for (int i = 0; i < expiredDatabasesCount; i++) { + // This currently fails because we subtract one millisecond from the lastChecked time + // assertThat(geoIpDownloader.state.getDatabases().get("expiredDatabase" + i).lastCheck(), equalTo(-1L)); + } + for (int i = 0; i < unexpiredDatabasesCount; i++) { + assertThat( + geoIpDownloader.state.getDatabases().get("unexpiredDatabase" + i).lastCheck(), + greaterThanOrEqualTo(Instant.now().minus(30, ChronoUnit.DAYS).toEpochMilli()) + ); + } + assertThat(deleteCount.get(), equalTo(expiredDatabasesCount)); + assertThat(updatePersistentTaskStateCount.get(), equalTo(expiredDatabasesCount)); + geoIpDownloader.runDownloader(); + /* + * The following two lines assert current behavior that might not be desirable -- we continue to delete expired databases every + * time that runDownloader runs. This seems unnecessary. + */ + assertThat(deleteCount.get(), equalTo(expiredDatabasesCount * 2)); + assertThat(updatePersistentTaskStateCount.get(), equalTo(expiredDatabasesCount * 2)); + } + + private GeoIpTaskState.Metadata newGeoIpTaskStateMetadata(boolean expired) { + Instant lastChecked; + if (expired) { + lastChecked = Instant.now().minus(randomIntBetween(31, 100), ChronoUnit.DAYS); + } else { + lastChecked = Instant.now().minus(randomIntBetween(0, 29), ChronoUnit.DAYS); + } + return new GeoIpTaskState.Metadata(0, 0, 0, randomAlphaOfLength(20), lastChecked.toEpochMilli()); + } + private static class MockClient extends NoOpClient { private final Map, BiConsumer>> handlers = new HashMap<>(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index d53c379a37644..72b48c5903629 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -443,4 +444,19 @@ protected void doClose() { } super.doClose(); } + + @Override + public String getAnalysisFailureExtraDetail() { + return Strings.format( + """ + Elasticsearch observed the storage system underneath this repository behaved incorrectly which indicates it is not \ + suitable for use with Elasticsearch snapshots. Typically this happens when using storage other than AWS S3 which \ + incorrectly claims to be S3-compatible. If so, please report this incompatibility to your storage supplier. Do not report \ + Elasticsearch issues involving storage systems which claim to be S3-compatible unless you can demonstrate that the same \ + issue exists when using a genuine AWS S3 repository. See [%s] for further information about repository analysis, and [%s] \ + for further information about support for S3-compatible repository implementations.""", + ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS, + ReferenceDocs.S3_COMPATIBLE_REPOSITORIES + ); + } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index fcb0e82505dac..4bbc791e5fe21 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -11,6 +11,7 @@ import com.amazonaws.services.s3.AbstractAmazonS3; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -28,6 +29,7 @@ import java.util.Map; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -152,4 +154,24 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { ); } + public void testAnalysisFailureDetail() { + try ( + S3Repository s3repo = createS3Repo( + new RepositoryMetadata("dummy-repo", "mock", Settings.builder().put(S3Repository.BUCKET_SETTING.getKey(), "bucket").build()) + ) + ) { + assertThat( + s3repo.getAnalysisFailureExtraDetail(), + allOf( + containsString("storage system underneath this repository behaved incorrectly"), + containsString("incorrectly claims to be S3-compatible"), + containsString("report this incompatibility to your storage supplier"), + containsString("unless you can demonstrate that the same issue exists when using a genuine AWS S3 repository"), + containsString(ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS.toString()), + containsString(ReferenceDocs.S3_COMPATIBLE_REPOSITORIES.toString()) + ) + ); + } + } + } diff --git a/muted-tests.yml b/muted-tests.yml index ccbdb68fbb8c7..18943a03219a9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -47,21 +47,9 @@ tests: - class: "org.elasticsearch.xpack.test.rest.XPackRestIT" issue: "https://github.com/elastic/elasticsearch/issues/109687" method: "test {p0=sql/translate/Translate SQL}" -- class: org.elasticsearch.action.search.SearchProgressActionListenerIT - method: testSearchProgressWithHits - issue: https://github.com/elastic/elasticsearch/issues/109830 -- class: "org.elasticsearch.xpack.security.ScrollHelperIntegTests" - issue: "https://github.com/elastic/elasticsearch/issues/109905" - method: "testFetchAllEntities" -- class: "org.elasticsearch.xpack.esql.action.AsyncEsqlQueryActionIT" - issue: "https://github.com/elastic/elasticsearch/issues/109944" - method: "testBasicAsyncExecution" - class: "org.elasticsearch.action.admin.indices.rollover.RolloverIT" issue: "https://github.com/elastic/elasticsearch/issues/110034" method: "testRolloverWithClosedWriteIndex" -- class: org.elasticsearch.xpack.transform.transforms.TransformIndexerTests - method: testMaxPageSearchSizeIsResetToConfiguredValue - issue: https://github.com/elastic/elasticsearch/issues/109844 - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testStoreDirectory issue: https://github.com/elastic/elasticsearch/issues/110210 @@ -70,18 +58,12 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/110211 - class: "org.elasticsearch.rest.RestControllerIT" issue: "https://github.com/elastic/elasticsearch/issues/110225" -- class: "org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests" - issue: "https://github.com/elastic/elasticsearch/issues/110227" - method: "testGetPrivilegesUsesCache" - class: org.elasticsearch.upgrades.SecurityIndexRolesMetadataMigrationIT method: testMetadataMigratedAfterUpgrade issue: https://github.com/elastic/elasticsearch/issues/110232 - class: org.elasticsearch.compute.lucene.ValueSourceReaderTypeConversionTests method: testLoadAll issue: https://github.com/elastic/elasticsearch/issues/110244 -- class: org.elasticsearch.action.search.SearchProgressActionListenerIT - method: testSearchProgressWithQuery - issue: https://github.com/elastic/elasticsearch/issues/109867 - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsNewVersion issue: https://github.com/elastic/elasticsearch/issues/95384 @@ -91,9 +73,6 @@ tests: - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsOldVersion issue: https://github.com/elastic/elasticsearch/issues/109454 -- class: org.elasticsearch.search.aggregations.bucket.terms.RareTermsIT - method: testSingleValuedString - issue: https://github.com/elastic/elasticsearch/issues/110388 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index c48674831c422..b63522daa4b4c 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -58,7 +58,11 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "processTestResources" mustRunAfter("precommit") doFirst { - localCluster.get().nextNodeToNextVersion() + def cluster = localCluster.get() + cluster.nodes.forEach { node -> + node.getAllTransportPortURI() + } + cluster.nextNodeToNextVersion() } } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsUpgradeIT.java new file mode 100644 index 0000000000000..c80911fe5fbcf --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsUpgradeIT.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.DefaultLocalClusterSpecBuilder; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Map; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class FileSettingsUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + @BeforeClass + public static void checkVersion() { + assumeTrue("Only valid when upgrading from pre-file settings", getOldClusterTestVersion().before(new Version(8, 4, 0))); + } + + private static final String settingsJSON = """ + { + "metadata": { + "version": "1", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "50mb" + } + } + }"""; + + private static final TemporaryFolder repoDirectory = new TemporaryFolder(); + + private static final ElasticsearchCluster cluster = new DefaultLocalClusterSpecBuilder().distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(NODE_NUM) + .setting("path.repo", new Supplier<>() { + @Override + @SuppressForbidden(reason = "TemporaryFolder only has io.File methods, not nio.File") + public String get() { + return repoDirectory.getRoot().getPath(); + } + }) + .setting("xpack.security.enabled", "false") + .feature(FeatureFlag.TIME_SERIES_MODE) + .configFile("operator/settings.json", Resource.fromString(settingsJSON)) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + public FileSettingsUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + public void testFileSettingsApplied() throws IOException { + if (isUpgradedCluster()) { + // the nodes have all been upgraded. Check they read the file settings ok + Map response = responseAsMap(adminClient().performRequest(new Request("GET", "/_cluster/settings"))); + assertThat(XContentMapValues.extractValue(response, "persistent", "indices", "recovery", "max_bytes_per_sec"), equalTo("50mb")); + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json index 28c341d9983cc..a96be0d63834e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json @@ -1,7 +1,7 @@ { "capabilities": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/capabilities.html", + "url": "https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities", "description": "Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported" }, "stability": "experimental", diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index 7f0c24e217d14..825bcecf33fce 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -287,6 +287,9 @@ setup: - requires: cluster_features: "gte_v8.4.0" reason: 'kNN added to search endpoint in 8.4' + - skip: + cluster_features: "gte_v8.16.0" + reason: 'non-existent field handling improved in 8.16' - do: catch: bad_request search: @@ -298,9 +301,28 @@ setup: query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 + - match: { error.root_cause.0.type: "query_shard_exception" } - match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" } +--- +"Test nonexistent field is match none": + - requires: + cluster_features: "gte_v8.16.0" + reason: 'non-existent field handling improved in 8.16' + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nonexistent + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 2 + num_candidates: 3 + + - length: {hits.hits: 0} + --- "KNN Vector similarity search only": - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index 983ac2719e71b..806e5ff73b355 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -148,6 +148,9 @@ setup: --- "Test nonexistent field": + - skip: + cluster_features: 'gte_v8.16.0' + reason: 'non-existent field handling improved in 8.16' - do: catch: bad_request search: @@ -159,8 +162,26 @@ setup: query_vector: [ 1, 0, 0, 0, -1 ] k: 2 num_candidates: 3 + - match: { error.root_cause.0.type: "query_shard_exception" } - match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" } +--- +"Test nonexistent field is match none": + - requires: + cluster_features: 'gte_v8.16.0' + reason: 'non-existent field handling improved in 8.16' + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nonexistent + query_vector: [ 1, 0, 0, 0, -1 ] + k: 2 + num_candidates: 3 + + - length: {hits.hits: 0} --- "Vector similarity search only": diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index c2d1cdae85cd9..0368dec76df0e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -121,6 +122,8 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( "action.search.shard_count.limit", diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index a60262ff4a097..84811362c08e6 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -584,7 +584,7 @@ public BootstrapCheckResult check(BootstrapContext context) { // visible for testing boolean isSystemCallFilterInstalled() { - return Natives.isSystemCallFilterInstalled(); + return NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.NONE; } @Override @@ -608,7 +608,7 @@ public BootstrapCheckResult check(BootstrapContext context) { // visible for testing boolean isSystemCallFilterInstalled() { - return Natives.isSystemCallFilterInstalled(); + return NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.NONE; } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java index f8ad9dd59650c..005375bf38540 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java @@ -27,16 +27,6 @@ public final class BootstrapInfo { /** no instantiation */ private BootstrapInfo() {} - /** - * Returns true if we successfully loaded native libraries. - *

- * If this returns false, then native operations such as locking - * memory did not work. - */ - public static boolean isNativesAvailable() { - return Natives.JNA_AVAILABLE; - } - /** * Returns true if we were able to lock the process's address space. */ @@ -44,13 +34,6 @@ public static boolean isMemoryLocked() { return NativeAccess.instance().isMemoryLocked(); } - /** - * Returns true if system call filter is installed (supported systems only) - */ - public static boolean isSystemCallFilterInstalled() { - return Natives.isSystemCallFilterInstalled(); - } - /** * Returns information about the console (tty) attached to the server process, or {@code null} * if no console is attached. diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 082e1dd9257e0..3fc659cb8065d 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -293,7 +293,7 @@ static void initializeNatives(final Path tmpFile, final boolean mlockAll, final * * TODO: should we fail hard here if system call filters fail to install, or remain lenient in non-production environments? */ - Natives.tryInstallSystemCallFilter(tmpFile); + nativeAccess.tryInstallExecSandbox(); } // mlockall if requested @@ -316,13 +316,6 @@ static void initializeNatives(final Path tmpFile, final boolean mlockAll, final } } - // force remainder of JNA to be loaded (if available). - try { - JNAKernel32Library.getInstance(); - } catch (Exception ignored) { - // we've already logged this. - } - // init lucene random seed. it will use /dev/urandom where available: StringHelper.randomId(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java deleted file mode 100644 index 01d9a122138f1..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import com.sun.jna.IntegerType; -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Pointer; -import com.sun.jna.Structure; -import com.sun.jna.WString; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.util.Arrays; -import java.util.List; - -/** - * Library for Windows/Kernel32 - */ -final class JNAKernel32Library { - - private static final Logger logger = LogManager.getLogger(JNAKernel32Library.class); - - // Native library instance must be kept around for the same reason. - private static final class Holder { - private static final JNAKernel32Library instance = new JNAKernel32Library(); - } - - private JNAKernel32Library() { - if (Constants.WINDOWS) { - try { - Native.register("kernel32"); - logger.debug("windows/Kernel32 library loaded"); - } catch (NoClassDefFoundError e) { - logger.warn("JNA not found. native methods and handlers will be disabled."); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link Windows/Kernel32 library. native methods and handlers will be disabled."); - } - } - } - - static JNAKernel32Library getInstance() { - return Holder.instance; - } - - /** - * Memory protection constraints - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx - */ - public static final int PAGE_NOACCESS = 0x0001; - public static final int PAGE_GUARD = 0x0100; - public static final int MEM_COMMIT = 0x1000; - - /** - * Contains information about a range of pages in the virtual address space of a process. - * The VirtualQuery and VirtualQueryEx functions use this structure. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx - */ - public static class MemoryBasicInformation extends Structure { - public Pointer BaseAddress; - public Pointer AllocationBase; - public NativeLong AllocationProtect; - public SizeT RegionSize; - public NativeLong State; - public NativeLong Protect; - public NativeLong Type; - - @Override - protected List getFieldOrder() { - return Arrays.asList("BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"); - } - } - - public static class SizeT extends IntegerType { - - // JNA requires this no-arg constructor to be public, - // otherwise it fails to register kernel32 library - public SizeT() { - this(0); - } - - SizeT(long value) { - super(Native.SIZE_T_SIZE, value); - } - - } - - /** - * Locks the specified region of the process's virtual address space into physical - * memory, ensuring that subsequent access to the region will not incur a page fault. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx - * - * @param address A pointer to the base address of the region of pages to be locked. - * @param size The size of the region to be locked, in bytes. - * @return true if the function succeeds - */ - native boolean VirtualLock(Pointer address, SizeT size); - - /** - * Retrieves information about a range of pages within the virtual address space of a specified process. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx - * - * @param handle A handle to the process whose memory information is queried. - * @param address A pointer to the base address of the region of pages to be queried. - * @param memoryInfo A pointer to a structure in which information about the specified page range is returned. - * @param length The size of the buffer pointed to by the memoryInfo parameter, in bytes. - * @return the actual number of bytes returned in the information buffer. - */ - native int VirtualQueryEx(Pointer handle, Pointer address, MemoryBasicInformation memoryInfo, int length); - - /** - * Sets the minimum and maximum working set sizes for the specified process. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx - * - * @param handle A handle to the process whose working set sizes is to be set. - * @param minSize The minimum working set size for the process, in bytes. - * @param maxSize The maximum working set size for the process, in bytes. - * @return true if the function succeeds. - */ - native boolean SetProcessWorkingSetSize(Pointer handle, SizeT minSize, SizeT maxSize); - - /** - * Retrieves a pseudo handle for the current process. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx - * - * @return a pseudo handle to the current process. - */ - native Pointer GetCurrentProcess(); - - /** - * Closes an open object handle. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx - * - * @param handle A valid handle to an open object. - * @return true if the function succeeds. - */ - native boolean CloseHandle(Pointer handle); - - /** - * Retrieves the short path form of the specified path. See - * {@code GetShortPathName}. - * - * @param lpszLongPath the path string - * @param lpszShortPath a buffer to receive the short name - * @param cchBuffer the size of the buffer - * @return the length of the string copied into {@code lpszShortPath}, otherwise zero for failure - */ - native int GetShortPathNameW(WString lpszLongPath, char[] lpszShortPath, int cchBuffer); - - /** - * Creates or opens a new job object - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx - * - * @param jobAttributes security attributes - * @param name job name - * @return job handle if the function succeeds - */ - native Pointer CreateJobObjectW(Pointer jobAttributes, String name); - - /** - * Associates a process with an existing job - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx - * - * @param job job handle - * @param process process handle - * @return true if the function succeeds - */ - native boolean AssignProcessToJobObject(Pointer job, Pointer process); - - /** - * Basic limit information for a job object - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx - */ - public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference { - public long PerProcessUserTimeLimit; - public long PerJobUserTimeLimit; - public int LimitFlags; - public SizeT MinimumWorkingSetSize; - public SizeT MaximumWorkingSetSize; - public int ActiveProcessLimit; - public Pointer Affinity; - public int PriorityClass; - public int SchedulingClass; - - @Override - protected List getFieldOrder() { - return Arrays.asList( - "PerProcessUserTimeLimit", - "PerJobUserTimeLimit", - "LimitFlags", - "MinimumWorkingSetSize", - "MaximumWorkingSetSize", - "ActiveProcessLimit", - "Affinity", - "PriorityClass", - "SchedulingClass" - ); - } - } - - /** - * Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject - */ - static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2; - - /** - * Constant for LimitFlags, indicating a process limit has been set - */ - static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8; - - /** - * Get job limit and state information - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx - * - * @param job job handle - * @param infoClass information class constant - * @param info pointer to information structure - * @param infoLength size of information structure - * @param returnLength length of data written back to structure (or null if not wanted) - * @return true if the function succeeds - */ - native boolean QueryInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength, Pointer returnLength); - - /** - * Set job limit and state information - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx - * - * @param job job handle - * @param infoClass information class constant - * @param info pointer to information structure - * @param infoLength size of information structure - * @return true if the function succeeds - */ - native boolean SetInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength); -} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java deleted file mode 100644 index ba4e90ee2c6c1..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.nio.file.Path; - -/** - * This class performs the actual work with JNA and library bindings to call native methods. It should only be used after - * we are sure that the JNA classes are available to the JVM - */ -class JNANatives { - - /** no instantiation */ - private JNANatives() {} - - private static final Logger logger = LogManager.getLogger(JNANatives.class); - - // Set to true, in case native system call filter install was successful - static boolean LOCAL_SYSTEM_CALL_FILTER = false; - // Set to true, in case policy can be applied to all threads of the process (even existing ones) - // otherwise they are only inherited for new threads (ES app threads) - static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false; - - static void tryInstallSystemCallFilter(Path tmpFile) { - try { - int ret = SystemCallFilter.init(tmpFile); - LOCAL_SYSTEM_CALL_FILTER = true; - if (ret == 1) { - LOCAL_SYSTEM_CALL_FILTER_ALL = true; - } - } catch (Exception e) { - // this is likely to happen unless the kernel is newish, its a best effort at the moment - // so we log stacktrace at debug for now... - if (logger.isDebugEnabled()) { - logger.debug("unable to install syscall filter", e); - } - logger.warn("unable to install syscall filter: ", e); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java deleted file mode 100644 index c792d1e0bfad0..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.ReferenceDocs; - -import java.lang.invoke.MethodHandles; -import java.nio.file.Path; -import java.util.Locale; - -/** - * The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on - * startup. If they are not available, this class will avoid calling code that loads these classes. - */ -final class Natives { - /** no instantiation */ - private Natives() {} - - private static final Logger logger = LogManager.getLogger(Natives.class); - - // marker to determine if the JNA class files are available to the JVM - static final boolean JNA_AVAILABLE; - - static { - boolean v = false; - try { - // load one of the main JNA classes to see if the classes are available. this does not ensure that all native - // libraries are available, only the ones necessary by JNA to function - MethodHandles.publicLookup().ensureInitialized(com.sun.jna.Native.class); - v = true; - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } catch (UnsatisfiedLinkError e) { - logger.warn( - String.format( - Locale.ROOT, - "unable to load JNA native support library, native methods will be disabled. See %s", - ReferenceDocs.EXECUTABLE_JNA_TMPDIR - ), - e - ); - } - JNA_AVAILABLE = v; - } - - static void tryInstallSystemCallFilter(Path tmpFile) { - if (JNA_AVAILABLE == false) { - logger.warn("cannot install system call filter because JNA is not available"); - return; - } - JNANatives.tryInstallSystemCallFilter(tmpFile); - } - - static boolean isSystemCallFilterInstalled() { - if (JNA_AVAILABLE == false) { - return false; - } - return JNANatives.LOCAL_SYSTEM_CALL_FILTER; - } - -} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java deleted file mode 100644 index 0ab855d1d5f3a..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java +++ /dev/null @@ -1,641 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import com.sun.jna.Library; -import com.sun.jna.Memory; -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Pointer; -import com.sun.jna.Structure; -import com.sun.jna.ptr.PointerByReference; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; -import org.elasticsearch.core.IOUtils; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** - * Installs a system call filter to block process execution. - *

- * This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows. - *

- * On Linux it currently supports amd64 and i386 architectures, requires Linux kernel 3.5 or above, and requires - * {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel. - *

- * On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)} - * is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation - * here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method - * which will at least protect elasticsearch application threads. - *

- * Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls: - *

- *

- * On Solaris 10 or higher, the following privileges are dropped with {@code priv_set(3C)}: - *

- *

- * On BSD systems, process creation is restricted with {@code setrlimit(RLIMIT_NPROC)}. - *

- * On Mac OS X Leopard or above, a custom {@code sandbox(7)} ("Seatbelt") profile is installed that - * denies the following rules: - *

- *

- * On Windows, process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}. - *

- * This is not intended as a sandbox. It is another level of security, mostly intended to annoy - * security researchers and make their lives more difficult in achieving "remote execution" exploits. - * @see - * http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt - * @see - * https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf - * @see - * https://docs.oracle.com/cd/E23824_01/html/821-1456/prbac-2.html - */ -// not an example of how to write code!!! -final class SystemCallFilter { - private static final Logger logger = LogManager.getLogger(SystemCallFilter.class); - - // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering - - /** Access to non-standard Linux libc methods */ - interface LinuxLibrary extends Library { - /** - * maps to prctl(2) - */ - int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5); - - /** - * used to call seccomp(2), its too new... - * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing - */ - NativeLong syscall(NativeLong number, Object... args); - } - - // null if unavailable or something goes wrong. - private static final LinuxLibrary linux_libc; - - static { - LinuxLibrary lib = null; - if (Constants.LINUX) { - try { - lib = Native.loadLibrary("c", LinuxLibrary.class); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link C library. native methods (seccomp) will be disabled.", e); - } - } - linux_libc = lib; - } - - /** the preferred method is seccomp(2), since we can apply to all threads of the process */ - static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17 - static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17 - - /** otherwise, we can use prctl(2), which will at least protect ES application threads */ - static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5 - static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5 - static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23 - static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23 - static final long SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5 - - /** corresponds to struct sock_filter */ - static final class SockFilter { - short code; // insn - byte jt; // number of insn to jump (skip) if true - byte jf; // number of insn to jump (skip) if false - int k; // additional data - - SockFilter(short code, byte jt, byte jf, int k) { - this.code = code; - this.jt = jt; - this.jf = jf; - this.k = k; - } - } - - /** corresponds to struct sock_fprog */ - public static final class SockFProg extends Structure implements Structure.ByReference { - public short len; // number of filters - public Pointer filter; // filters - - SockFProg(SockFilter filters[]) { - len = (short) filters.length; - // serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would need - Memory filter = new Memory(len * 8); - ByteBuffer bbuf = filter.getByteBuffer(0, len * 8); - bbuf.order(ByteOrder.nativeOrder()); // little endian - for (SockFilter f : filters) { - bbuf.putShort(f.code); - bbuf.put(f.jt); - bbuf.put(f.jf); - bbuf.putInt(f.k); - } - this.filter = filter; - } - - @Override - protected List getFieldOrder() { - return Arrays.asList("len", "filter"); - } - } - - // BPF "macros" and constants - static final int BPF_LD = 0x00; - static final int BPF_W = 0x00; - static final int BPF_ABS = 0x20; - static final int BPF_JMP = 0x05; - static final int BPF_JEQ = 0x10; - static final int BPF_JGE = 0x30; - static final int BPF_JGT = 0x20; - static final int BPF_RET = 0x06; - static final int BPF_K = 0x00; - - static SockFilter BPF_STMT(int code, int k) { - return new SockFilter((short) code, (byte) 0, (byte) 0, k); - } - - static SockFilter BPF_JUMP(int code, int k, int jt, int jf) { - return new SockFilter((short) code, (byte) jt, (byte) jf, k); - } - - static final int SECCOMP_RET_ERRNO = 0x00050000; - static final int SECCOMP_RET_DATA = 0x0000FFFF; - static final int SECCOMP_RET_ALLOW = 0x7FFF0000; - - // some errno constants for error checking/handling - static final int EACCES = 0x0D; - static final int EFAULT = 0x0E; - static final int EINVAL = 0x16; - static final int ENOSYS = 0x26; - - // offsets that our BPF checks - // check with offsetof() when adding a new arch, move to Arch if different. - static final int SECCOMP_DATA_NR_OFFSET = 0x00; - static final int SECCOMP_DATA_ARCH_OFFSET = 0x04; - - record Arch( - int audit, // AUDIT_ARCH_XXX constant from linux/audit.h - int limit, // syscall limit (necessary for blacklisting on amd64, to ban 32-bit syscalls) - int fork, // __NR_fork - int vfork, // __NR_vfork - int execve, // __NR_execve - int execveat, // __NR_execveat - int seccomp // __NR_seccomp - ) {} - - /** supported architectures map keyed by os.arch */ - private static final Map ARCHITECTURES; - static { - ARCHITECTURES = Map.of( - "amd64", - new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317), - "aarch64", - new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277) - ); - } - - /** invokes prctl() from linux libc library */ - private static int linux_prctl(int option, long arg2, long arg3, long arg4, long arg5) { - return linux_libc.prctl(option, new NativeLong(arg2), new NativeLong(arg3), new NativeLong(arg4), new NativeLong(arg5)); - } - - /** invokes syscall() from linux libc library */ - private static long linux_syscall(long number, Object... args) { - return linux_libc.syscall(new NativeLong(number), args).longValue(); - } - - /** try to install our BPF filters via seccomp() or prctl() to block execution */ - private static int linuxImpl() { - // first be defensive: we can give nice errors this way, at the very least. - // also, some of these security features get backported to old versions, checking kernel version here is a big no-no! - final Arch arch = ARCHITECTURES.get(Constants.OS_ARCH); - boolean supported = Constants.LINUX && arch != null; - if (supported == false) { - throw new UnsupportedOperationException("seccomp unavailable: '" + Constants.OS_ARCH + "' architecture unsupported"); - } - - // we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug - if (linux_libc == null) { - throw new UnsupportedOperationException( - "seccomp unavailable: could not link methods. requires kernel 3.5+ " - + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" - ); - } - - // try to check system calls really are who they claim - // you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57) - final int bogusArg = 0xf7a46a5c; - - // test seccomp(BOGUS) - long ret = linux_syscall(arch.seccomp, bogusArg); - if (ret != -1) { - throw new UnsupportedOperationException("seccomp unavailable: seccomp(BOGUS_OPERATION) returned " + ret); - } else { - int errno = Native.getLastError(); - switch (errno) { - case ENOSYS: - break; // ok - case EINVAL: - break; // ok - default: - throw new UnsupportedOperationException("seccomp(BOGUS_OPERATION): " + JNACLibrary.strerror(errno)); - } - } - - // test seccomp(VALID, BOGUS) - ret = linux_syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, bogusArg); - if (ret != -1) { - throw new UnsupportedOperationException("seccomp unavailable: seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG) returned " + ret); - } else { - int errno = Native.getLastError(); - switch (errno) { - case ENOSYS: - break; // ok - case EINVAL: - break; // ok - default: - throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + JNACLibrary.strerror(errno)); - } - } - - // test prctl(BOGUS) - ret = linux_prctl(bogusArg, 0, 0, 0, 0); - if (ret != -1) { - throw new UnsupportedOperationException("seccomp unavailable: prctl(BOGUS_OPTION) returned " + ret); - } else { - int errno = Native.getLastError(); - switch (errno) { - case ENOSYS: - break; // ok - case EINVAL: - break; // ok - default: - throw new UnsupportedOperationException("prctl(BOGUS_OPTION): " + JNACLibrary.strerror(errno)); - } - } - - // now just normal defensive checks - - // check for GET_NO_NEW_PRIVS - switch (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)) { - case 0: - break; // not yet set - case 1: - break; // already set by caller - default: - int errno = Native.getLastError(); - if (errno == EINVAL) { - // friendly error, this will be the typical case for an old kernel - throw new UnsupportedOperationException( - "seccomp unavailable: requires kernel 3.5+ with" + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" - ); - } else { - throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno)); - } - } - // check for SECCOMP - switch (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0)) { - case 0: - break; // not yet set - case 2: - break; // already in filter mode by caller - default: - int errno = Native.getLastError(); - if (errno == EINVAL) { - throw new UnsupportedOperationException( - "seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," - + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" - ); - } else { - throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno)); - } - } - // check for SECCOMP_MODE_FILTER - if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) != 0) { - int errno = Native.getLastError(); - switch (errno) { - case EFAULT: - break; // available - case EINVAL: - throw new UnsupportedOperationException( - "seccomp unavailable: CONFIG_SECCOMP_FILTER not" - + " compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" - ); - default: - throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno)); - } - } - - // ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user - if (linux_prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) { - throw new UnsupportedOperationException("prctl(PR_SET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError())); - } - - // check it worked - if (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { - throw new UnsupportedOperationException( - "seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError()) - ); - } - - // BPF installed to check arch, limit, then syscall. - // See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details. - SockFilter insns[] = { - /* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), // - /* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail; - /* 3 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), // - /* 4 */ BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, arch.limit, 5, 0), // if (syscall > LIMIT) goto fail; - /* 5 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.fork, 4, 0), // if (syscall == FORK) goto fail; - /* 6 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.vfork, 3, 0), // if (syscall == VFORK) goto fail; - /* 7 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execve, 2, 0), // if (syscall == EXECVE) goto fail; - /* 8 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execveat, 1, 0), // if (syscall == EXECVEAT) goto fail; - /* 9 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), // pass: return OK; - /* 10 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES; - }; - // seccomp takes a long, so we pass it one explicitly to keep the JNA simple - SockFProg prog = new SockFProg(insns); - prog.write(); - long pointer = Pointer.nativeValue(prog.getPointer()); - - int method = 1; - // install filter, if this works, after this there is no going back! - // first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl() - if (linux_syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, new NativeLong(pointer)) != 0) { - method = 0; - int errno1 = Native.getLastError(); - if (logger.isDebugEnabled()) { - logger.debug( - "seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", - JNACLibrary.strerror(errno1) - ); - } - if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) { - int errno2 = Native.getLastError(); - throw new UnsupportedOperationException( - "seccomp(SECCOMP_SET_MODE_FILTER): " - + JNACLibrary.strerror(errno1) - + ", prctl(PR_SET_SECCOMP): " - + JNACLibrary.strerror(errno2) - ); - } - } - - // now check that the filter was really installed, we should be in filter mode. - if (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) { - throw new UnsupportedOperationException( - "seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " - + JNACLibrary.strerror(Native.getLastError()) - ); - } - - logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app"); - return method; - } - - // OS X implementation via sandbox(7) - - /** Access to non-standard OS X libc methods */ - interface MacLibrary extends Library { - /** - * maps to sandbox_init(3), since Leopard - */ - int sandbox_init(String profile, long flags, PointerByReference errorbuf); - - /** - * releases memory when an error occurs during initialization (e.g. syntax bug) - */ - void sandbox_free_error(Pointer errorbuf); - } - - // null if unavailable, or something goes wrong. - private static final MacLibrary libc_mac; - - static { - MacLibrary lib = null; - if (Constants.MAC_OS_X) { - try { - lib = Native.loadLibrary("c", MacLibrary.class); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link C library. native methods (seatbelt) will be disabled.", e); - } - } - libc_mac = lib; - } - - /** The only supported flag... */ - static final int SANDBOX_NAMED = 1; - /** Allow everything except process fork and execution */ - static final String SANDBOX_RULES = "(version 1) (allow default) (deny process-fork) (deny process-exec)"; - - /** try to install our custom rule profile into sandbox_init() to block execution */ - private static void macImpl(Path tmpFile) throws IOException { - // first be defensive: we can give nice errors this way, at the very least. - boolean supported = Constants.MAC_OS_X; - if (supported == false) { - throw new IllegalStateException("bug: should not be trying to initialize seatbelt for an unsupported OS"); - } - - // we couldn't link methods, could be some really ancient OS X (< Leopard) or some bug - if (libc_mac == null) { - throw new UnsupportedOperationException("seatbelt unavailable: could not link methods. requires Leopard or above."); - } - - // write rules to a temporary file, which will be passed to sandbox_init() - Path rules = Files.createTempFile(tmpFile, "es", "sb"); - Files.write(rules, Collections.singleton(SANDBOX_RULES)); - - boolean success = false; - try { - PointerByReference errorRef = new PointerByReference(); - int ret = libc_mac.sandbox_init(rules.toAbsolutePath().toString(), SANDBOX_NAMED, errorRef); - // if sandbox_init() fails, add the message from the OS (e.g. syntax error) and free the buffer - if (ret != 0) { - Pointer errorBuf = errorRef.getValue(); - RuntimeException e = new UnsupportedOperationException("sandbox_init(): " + errorBuf.getString(0)); - libc_mac.sandbox_free_error(errorBuf); - throw e; - } - logger.debug("OS X seatbelt initialization successful"); - success = true; - } finally { - if (success) { - Files.delete(rules); - } else { - IOUtils.deleteFilesIgnoringExceptions(rules); - } - } - } - - // Solaris implementation via priv_set(3C) - - /** Access to non-standard Solaris libc methods */ - interface SolarisLibrary extends Library { - /** - * see priv_set(3C), a convenience method for setppriv(2). - */ - int priv_set(int op, String which, String... privs); - } - - // null if unavailable, or something goes wrong. - private static final SolarisLibrary libc_solaris; - - static { - SolarisLibrary lib = null; - if (Constants.SUN_OS) { - try { - lib = Native.loadLibrary("c", SolarisLibrary.class); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link C library. native methods (priv_set) will be disabled.", e); - } - } - libc_solaris = lib; - } - - // constants for priv_set(2) - static final int PRIV_OFF = 1; - static final String PRIV_ALLSETS = null; - // see privileges(5) for complete list of these - static final String PRIV_PROC_FORK = "proc_fork"; - static final String PRIV_PROC_EXEC = "proc_exec"; - - static void solarisImpl() { - // first be defensive: we can give nice errors this way, at the very least. - boolean supported = Constants.SUN_OS; - if (supported == false) { - throw new IllegalStateException("bug: should not be trying to initialize priv_set for an unsupported OS"); - } - - // we couldn't link methods, could be some really ancient Solaris or some bug - if (libc_solaris == null) { - throw new UnsupportedOperationException("priv_set unavailable: could not link methods. requires Solaris 10+"); - } - - // drop a null-terminated list of privileges - if (libc_solaris.priv_set(PRIV_OFF, PRIV_ALLSETS, PRIV_PROC_FORK, PRIV_PROC_EXEC, null) != 0) { - throw new UnsupportedOperationException("priv_set unavailable: priv_set(): " + JNACLibrary.strerror(Native.getLastError())); - } - - logger.debug("Solaris priv_set initialization successful"); - } - - // BSD implementation via setrlimit(2) - - // TODO: add OpenBSD to Lucene Constants - // TODO: JNA doesn't have netbsd support, but this mechanism should work there too. - static final boolean OPENBSD = Constants.OS_NAME.startsWith("OpenBSD"); - - // not a standard limit, means something different on linux, etc! - static final int RLIMIT_NPROC = 7; - - static void bsdImpl() { - boolean supported = Constants.FREE_BSD || OPENBSD || Constants.MAC_OS_X; - if (supported == false) { - throw new IllegalStateException("bug: should not be trying to initialize RLIMIT_NPROC for an unsupported OS"); - } - - JNACLibrary.Rlimit limit = new JNACLibrary.Rlimit(); - limit.rlim_cur.setValue(0); - limit.rlim_max.setValue(0); - if (JNACLibrary.setrlimit(RLIMIT_NPROC, limit) != 0) { - throw new UnsupportedOperationException("RLIMIT_NPROC unavailable: " + JNACLibrary.strerror(Native.getLastError())); - } - - logger.debug("BSD RLIMIT_NPROC initialization successful"); - } - - // windows impl via job ActiveProcessLimit - - static void windowsImpl() { - if (Constants.WINDOWS == false) { - throw new IllegalStateException("bug: should not be trying to initialize ActiveProcessLimit for an unsupported OS"); - } - - JNAKernel32Library lib = JNAKernel32Library.getInstance(); - - // create a new Job - Pointer job = lib.CreateJobObjectW(null, null); - if (job == null) { - throw new UnsupportedOperationException("CreateJobObject: " + Native.getLastError()); - } - - try { - // retrieve the current basic limits of the job - int clazz = JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS; - JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION limits = new JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION(); - limits.write(); - if (lib.QueryInformationJobObject(job, clazz, limits.getPointer(), limits.size(), null) == false) { - throw new UnsupportedOperationException("QueryInformationJobObject: " + Native.getLastError()); - } - limits.read(); - // modify the number of active processes to be 1 (exactly the one process we will add to the job). - limits.ActiveProcessLimit = 1; - limits.LimitFlags = JNAKernel32Library.JOB_OBJECT_LIMIT_ACTIVE_PROCESS; - limits.write(); - if (lib.SetInformationJobObject(job, clazz, limits.getPointer(), limits.size()) == false) { - throw new UnsupportedOperationException("SetInformationJobObject: " + Native.getLastError()); - } - // assign ourselves to the job - if (lib.AssignProcessToJobObject(job, lib.GetCurrentProcess()) == false) { - throw new UnsupportedOperationException("AssignProcessToJobObject: " + Native.getLastError()); - } - } finally { - lib.CloseHandle(job); - } - - logger.debug("Windows ActiveProcessLimit initialization successful"); - } - - /** - * Attempt to drop the capability to execute for the process. - *

- * This is best effort and OS and architecture dependent. It may throw any Throwable. - * @return 0 if we can do this for application threads, 1 for the entire process - */ - static int init(Path tmpFile) throws Exception { - if (Constants.LINUX) { - return linuxImpl(); - } else if (Constants.MAC_OS_X) { - // try to enable both mechanisms if possible - bsdImpl(); - macImpl(tmpFile); - return 1; - } else if (Constants.SUN_OS) { - solarisImpl(); - return 1; - } else if (Constants.FREE_BSD || OPENBSD) { - bsdImpl(); - return 1; - } else if (Constants.WINDOWS) { - windowsImpl(); - return 1; - } else { - throw new UnsupportedOperationException("syscall filtering not supported for OS: '" + Constants.OS_NAME + "'"); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 1953c1680040a..770ed4d213c55 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -75,6 +75,8 @@ public enum ReferenceDocs { NETWORK_THREADING_MODEL, ALLOCATION_EXPLAIN_API, NETWORK_BINDING_AND_PUBLISHING, + SNAPSHOT_REPOSITORY_ANALYSIS, + S3_COMPATIBLE_REPOSITORIES, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/java/org/elasticsearch/common/util/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java index 53244a0f2888a..041111840056d 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -64,6 +64,17 @@ public void writeTo(StreamOutput out) throws IOException { bits.writeTo(out); } + /** + * Set or clear the {@code index}th bit based on the specified value. + */ + public void set(long index, boolean value) { + if (value) { + set(index); + } else { + clear(index); + } + } + /** * Set the {@code index}th bit. */ @@ -158,6 +169,68 @@ public boolean get(long index) { return (bits.get(wordNum) & bitmask) != 0; } + /** + * Set or clear slots between {@code fromIndex} inclusive to {@code toIndex} based on {@code value}. + */ + public void fill(long fromIndex, long toIndex, boolean value) { + if (fromIndex > toIndex) { + throw new IllegalArgumentException("From should be less than or equal to toIndex"); + } + long currentSize = size(); + if (value == false) { + // There's no need to grow the array just to clear bits. + toIndex = Math.min(toIndex, currentSize); + } + if (fromIndex == toIndex) { + return; // Empty range + } + + if (toIndex > currentSize) { + bits = bigArrays.grow(bits, wordNum(toIndex) + 1); + } + + int wordLength = Long.BYTES * Byte.SIZE; + long fullWord = 0xFFFFFFFFFFFFFFFFL; + + long firstWordIndex = fromIndex % wordLength; + long lastWordIndex = toIndex % wordLength; + + long firstWordNum = wordNum(fromIndex); + long lastWordNum = wordNum(toIndex - 1); + + // Mask first word + if (firstWordIndex > 0) { + long mask = fullWord << firstWordIndex; + + if (firstWordNum == lastWordNum) { + mask &= fullWord >>> (wordLength - lastWordIndex); + } + + if (value) { + bits.set(firstWordNum, bits.get(firstWordNum) | mask); + } else { + bits.set(firstWordNum, bits.get(firstWordNum) & ~mask); + } + + firstWordNum++; + } + + // Mask last word + if (firstWordNum <= lastWordNum) { + long mask = fullWord >>> (wordLength - lastWordIndex); + + if (value) { + bits.set(lastWordNum, bits.get(lastWordNum) | mask); + } else { + bits.set(lastWordNum, bits.get(lastWordNum) & ~mask); + } + } + + if (firstWordNum < lastWordNum) { + bits.fill(firstWordNum, lastWordNum, value ? fullWord : 0L); + } + } + public long size() { return bits.size() * (long) Long.BYTES * Byte.SIZE; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 8f55bf16c1674..5b7a11969973d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3946,4 +3946,16 @@ public boolean hasAtomicOverwrites() { public int getReadBufferSizeInBytes() { return bufferSize; } + + /** + * @return extra information to be included in the exception message emitted on failure of a repository analysis. + */ + public String getAnalysisFailureExtraDetail() { + return Strings.format( + """ + Elasticsearch observed the storage system underneath this repository behaved incorrectly which indicates it is not \ + suitable for use with Elasticsearch snapshots. See [%s] for further information.""", + ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS + ); + } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 0f64859e877f4..f1b1c24c50788 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ToChildBlockJoinQuery; @@ -436,7 +437,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { ? Math.round(Math.min(NUM_CANDS_MULTIPLICATIVE_FACTOR * requestSize, NUM_CANDS_LIMIT)) : numCands; if (fieldType == null) { - throw new IllegalArgumentException("field [" + fieldName + "] does not exist in the mapping"); + return new MatchNoDocsQuery(); } if (fieldType instanceof DenseVectorFieldType == false) { diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index 303ae22f16269..febcaec1ba057 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -35,5 +35,7 @@ "EXECUTABLE_JNA_TMPDIR": "executable-jna-tmpdir.html", "NETWORK_THREADING_MODEL": "modules-network.html#modules-network-threading-model", "ALLOCATION_EXPLAIN_API": "cluster-allocation-explain.html", - "NETWORK_BINDING_AND_PUBLISHING": "modules-network.html#modules-network-binding-publishing" + "NETWORK_BINDING_AND_PUBLISHING": "modules-network.html#modules-network-binding-publishing", + "SNAPSHOT_REPOSITORY_ANALYSIS": "repo-analysis-api.html", + "S3_COMPATIBLE_REPOSITORIES": "repository-s3.html#repository-s3-compatible-services" } diff --git a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java index f81a4bd2f4a18..e3f2522de4813 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java @@ -51,6 +51,27 @@ public void testRandom() { } } + public void testRandomSetValue() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int numBits = randomIntBetween(1000, 10000); + for (int step = 0; step < 3; step++) { + boolean[] bits = new boolean[numBits]; + List slots = new ArrayList<>(); + for (int i = 0; i < numBits; i++) { + bits[i] = randomBoolean(); + slots.add(i); + } + Collections.shuffle(slots, random()); + for (int i : slots) { + bitArray.set(i, bits[i]); + } + for (int i = 0; i < numBits; i++) { + assertEquals(bitArray.get(i), bits[i]); + } + } + } + } + public void testVeryLarge() { assumeThat(Runtime.getRuntime().maxMemory(), greaterThanOrEqualTo(ByteSizeUnit.MB.toBytes(512))); try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { @@ -183,6 +204,78 @@ public void testGetAndSet() { } } + public void testFillTrueRandom() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = randomIntBetween(0, 1000); + int to = randomIntBetween(from, 1000); + + bitArray.fill(0, 1000, false); + bitArray.fill(from, to, true); + + for (int i = 0; i < 1000; i++) { + if (i < from || i >= to) { + assertFalse(bitArray.get(i)); + } else { + assertTrue(bitArray.get(i)); + } + } + } + } + + public void testFillFalseRandom() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = randomIntBetween(0, 1000); + int to = randomIntBetween(from, 1000); + + bitArray.fill(0, 1000, true); + bitArray.fill(from, to, false); + + for (int i = 0; i < 1000; i++) { + if (i < from || i >= to) { + assertTrue(bitArray.get(i)); + } else { + assertFalse(bitArray.get(i)); + } + } + } + } + + public void testFillTrueSingleWord() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = 8; + int to = 56; + + bitArray.fill(0, 64, false); + bitArray.fill(from, to, true); + + for (int i = 0; i < 64; i++) { + if (i < from || i >= to) { + assertFalse(bitArray.get(i)); + } else { + assertTrue(bitArray.get(i)); + } + } + } + } + + public void testFillFalseSingleWord() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = 8; + int to = 56; + + bitArray.fill(0, 64, true); + bitArray.fill(from, to, false); + + for (int i = 0; i < 64; i++) { + if (i < from || i >= to) { + assertTrue(bitArray.get(i)); + } else { + assertFalse(bitArray.get(i)); + } + } + } + } + public void testSerialize() throws Exception { int initial = randomIntBetween(1, 100_000); BitArray bits1 = new BitArray(initial, BigArrays.NON_RECYCLING_INSTANCE); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java index f5d9f35e34695..f0899384dbc5e 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -25,6 +26,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermQueryBuilder; @@ -157,8 +159,16 @@ public void testWrongDimension() { public void testNonexistentField() { SearchExecutionContext context = createSearchExecutionContext(); KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("nonexistent", new float[] { 1.0f, 1.0f, 1.0f }, 5, 10, null); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> query.doToQuery(context)); - assertThat(e.getMessage(), containsString("field [nonexistent] does not exist in the mapping")); + context.setAllowUnmappedFields(false); + QueryShardException e = expectThrows(QueryShardException.class, () -> query.doToQuery(context)); + assertThat(e.getMessage(), containsString("No field mapping can be found for the field with name [nonexistent]")); + } + + public void testNonexistentFieldReturnEmpty() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("nonexistent", new float[] { 1.0f, 1.0f, 1.0f }, 5, 10, null); + Query queryNone = query.doToQuery(context); + assertThat(queryNone, instanceOf(MatchNoDocsQuery.class)); } public void testWrongFieldType() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index 78a4126ec09db..92d72afbf9d52 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -86,19 +86,6 @@ public boolean clusterHasFeature(String featureId) { Matcher matcher = VERSION_FEATURE_PATTERN.matcher(featureId); if (matcher.matches()) { Version extractedVersion = Version.fromString(matcher.group(1)); - if (Version.V_8_15_0.before(extractedVersion)) { - // As of version 8.14.0 REST tests have been migrated to use features only. - // For migration purposes we provide a synthetic version feature gte_vX.Y.Z for any version at or before 8.15.0 - // allowing for some transition period. - throw new IllegalArgumentException( - Strings.format( - "Synthetic version features are only available before [%s] for migration purposes! " - + "Please add a cluster feature to an appropriate FeatureSpecification; test-only historical-features " - + "can be supplied via ESRestTestCase#additionalTestOnlyHistoricalFeatures()", - Version.V_8_15_0 - ) - ); - } return version.onOrAfter(extractedVersion); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index 49fb38b518dce..a8a33da27aebe 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -16,7 +16,12 @@ */ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), - FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null); + FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null), + INFERENCE_ADAPTIVE_ALLOCATIONS_ENABLED( + "es.inference_adaptive_allocations_feature_flag_enabled=true", + Version.fromString("8.16.0"), + null + ); public final String systemProperty; public final Version from; diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java index f60466bcf43cc..7c6f85104b5f8 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java @@ -241,7 +241,6 @@ public void testTDigestHistogram() throws Exception { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110406") public void testBoxplotHistogram() throws Exception { int compression = TestUtil.nextInt(random(), 200, 300); setupTDigestHistogram(compression); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index e635851a4c5e8..59eaf4affa9a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; @@ -119,12 +120,14 @@ public static class Request extends MasterNodeRequest implements ToXCon ObjectParser.ValueType.VALUE ); PARSER.declareString(Request::setPriority, PRIORITY); - PARSER.declareObjectOrNull( - Request::setAdaptiveAllocationsSettings, - (p, c) -> AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), - null, - ADAPTIVE_ALLOCATIONS - ); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + PARSER.declareObjectOrNull( + Request::setAdaptiveAllocationsSettings, + (p, c) -> AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), + null, + ADAPTIVE_ALLOCATIONS + ); + } } public static Request parseRequest(String modelId, String deploymentId, XContentParser parser) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java index c69a88600f915..28152bc0d5556 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -49,12 +50,14 @@ public static class Request extends AcknowledgedRequest implements ToXC static { PARSER.declareString(Request::setDeploymentId, MODEL_ID); PARSER.declareInt(Request::setNumberOfAllocations, NUMBER_OF_ALLOCATIONS); - PARSER.declareObjectOrNull( - Request::setAdaptiveAllocationsSettings, - (p, c) -> AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), - AdaptiveAllocationsSettings.RESET_PLACEHOLDER, - ADAPTIVE_ALLOCATIONS - ); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + PARSER.declareObjectOrNull( + Request::setAdaptiveAllocationsSettings, + (p, c) -> AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), + AdaptiveAllocationsSettings.RESET_PLACEHOLDER, + ADAPTIVE_ALLOCATIONS + ); + } PARSER.declareString((r, val) -> r.ackTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsFeatureFlag.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsFeatureFlag.java new file mode 100644 index 0000000000000..a3b508c0534f9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsFeatureFlag.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.assignment; + +import org.elasticsearch.common.util.FeatureFlag; + +/** + * semantic_text feature flag. When the feature is complete, this flag will be removed. + */ +public class AdaptiveAllocationsFeatureFlag { + + private AdaptiveAllocationsFeatureFlag() {} + + private static final FeatureFlag FEATURE_FLAG = new FeatureFlag("inference_adaptive_allocations"); + + public static boolean isEnabled() { + return FEATURE_FLAG.isEnabled(); + } +} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json index 4f3fac1aed5ae..9960bd2e7fdac 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json @@ -10,9 +10,6 @@ "total_fields": { "ignore_dynamic_beyond_limit": true } - }, - "query": { - "default_field": ["message"] } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json index b0db168e8189d..cb0e2cbffb50b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json @@ -9,9 +9,6 @@ "total_fields": { "ignore_dynamic_beyond_limit": true } - }, - "query": { - "default_field": ["message"] } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json index fab8ca451358f..7457dce805eca 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json @@ -1,5 +1,7 @@ { - "index_patterns": [".monitoring-beats-${xpack.stack.monitoring.template.version}-*"], + "index_patterns": [ + ".monitoring-beats-${xpack.stack.monitoring.template.version}-*" + ], "version": ${xpack.stack.monitoring.template.release.version}, "template": { "mappings": { @@ -198,6 +200,9 @@ "ratelimit": { "type": "long" }, + "timeout": { + "type": "long" + }, "toolarge": { "type": "long" }, @@ -212,16 +217,6 @@ } } }, - "request": { - "properties": { - "count": { - "type": "long" - } - } - }, - "unset": { - "type": "long" - }, "valid": { "properties": { "accepted": { @@ -239,151 +234,436 @@ } } } + }, + "unset": { + "type": "long" } } }, - "decoder": { + "agentcfg": { "properties": { - "deflate": { - "properties": { - "content-length": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "gzip": { - "properties": { - "content-length": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "missing-content-length": { + "elasticsearch": { "properties": { - "count": { - "type": "long" - } - } - }, - "reader": { - "properties": { - "count": { - "type": "long" - }, - "size": { - "type": "long" - } - } - }, - "uncompressed": { - "properties": { - "content-length": { - "type": "long" + "cache": { + "properties": { + "entries": { + "properties": { + "count": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } + } + } }, - "count": { - "type": "long" + "fetch": { + "properties": { + "es": { + "type": "long" + }, + "fallback": { + "type": "long" + }, + "invalid": { + "type": "long" + }, + "unavailable": { + "type": "long" + } + } } } } } }, - "processor": { + "jaeger": { "properties": { - "error": { + "grpc": { "properties": { - "decoding": { + "collect": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "frames": { - "type": "long" - }, - "spans": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "transformations": { - "type": "long" - }, - "validation": { + "sampling": { "properties": { - "count": { - "type": "long" + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "long" + } + } + } + } }, - "errors": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } - }, - "metric": { + } + } + }, + "otlp": { + "properties": { + "grpc": { "properties": { - "decoding": { + "logs": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "transformations": { - "type": "long" + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } + }, + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } }, - "validation": { + "traces": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } }, - "sourcemap": { + "http": { "properties": { - "counter": { - "type": "long" + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } }, - "decoding": { + "metrics": { "properties": { - "count": { - "type": "long" + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "validation": { + "traces": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } + } + } + }, + "processor": { + "properties": { + "error": { + "properties": { + "transformations": { + "type": "long" + } + } + }, + "metric": { + "properties": { + "transformations": { + "type": "long" + } + } }, "span": { "properties": { @@ -392,60 +672,127 @@ } } }, - "transaction": { + "stream": { "properties": { - "decoding": { + "accepted": { + "type": "long" + }, + "errors": { "properties": { - "count": { + "invalid": { "type": "long" }, - "errors": { + "toolarge": { "type": "long" } } - }, - "frames": { - "type": "long" - }, - "spans": { + } + } + }, + "transaction": { + "properties": { + "transformations": { "type": "long" - }, - "stacktraces": { + } + } + } + } + }, + "root": { + "properties": { + "request": { + "properties": { + "count": { "type": "long" - }, - "transactions": { + } + } + }, + "response": { + "properties": { + "count": { "type": "long" }, - "transformations": { - "type": "long" + "errors": { + "properties": { + "closed": { + "type": "long" + }, + "count": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "internal": { + "type": "long" + }, + "invalidquery": { + "type": "long" + }, + "method": { + "type": "long" + }, + "notfound": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { + "type": "long" + } + } }, - "validation": { + "valid": { "properties": { + "accepted": { + "type": "long" + }, "count": { "type": "long" }, - "errors": { + "notmodified": { + "type": "long" + }, + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" + } + } + }, + "sampling": { + "properties": { + "transactions_dropped": { + "type": "long" } } }, "server": { "properties": { - "concurrent": { - "properties": { - "wait": { - "properties": { - "ms": { - "type": "long" - } - } - } - } - }, "request": { "properties": { "count": { @@ -478,21 +825,33 @@ "internal": { "type": "long" }, + "invalidquery": { + "type": "long" + }, "method": { "type": "long" }, + "notfound": { + "type": "long" + }, "queue": { "type": "long" }, "ratelimit": { "type": "long" }, + "timeout": { + "type": "long" + }, "toolarge": { "type": "long" }, "unauthorized": { "type": "long" }, + "unavailable": { + "type": "long" + }, "validate": { "type": "long" } @@ -506,12 +865,18 @@ "count": { "type": "long" }, + "notmodified": { + "type": "long" + }, "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } } @@ -918,6 +1283,37 @@ "type": "long" } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "long" + }, + "completed": { + "type": "long" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "long" + }, + "created": { + "type": "long" + }, + "destroyed": { + "type": "long" + } + } + } + } + } + } } } }, @@ -1135,6 +1531,10 @@ "type": "alias", "path": "beat.stats.apm_server.acm.response.errors.ratelimit" }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.timeout" + }, "toolarge": { "type": "alias", "path": "beat.stats.apm_server.acm.response.errors.toolarge" @@ -1153,18 +1553,6 @@ } } }, - "request": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.request.count" - } - } - }, - "unset": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.unset" - }, "valid": { "properties": { "accepted": { @@ -1179,9 +1567,485 @@ "type": "alias", "path": "beat.stats.apm_server.acm.response.valid.notmodified" }, - "ok": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.ok" + "ok": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.ok" + } + } + } + } + }, + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.acm.unset" + } + } + }, + "agentcfg": { + "properties": { + "elasticsearch": { + "properties": { + "cache": { + "properties": { + "entries": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.entries.count" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.refresh.failures" + }, + "successes": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.refresh.successes" + } + } + } + } + }, + "fetch": { + "properties": { + "es": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.es" + }, + "fallback": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.fallback" + }, + "invalid": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.invalid" + }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.unavailable" + } + } + } + } + } + } + }, + "jaeger": { + "properties": { + "grpc": { + "properties": { + "collect": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.valid.count" + } + } + } + } + } + } + }, + "sampling": { + "properties": { + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.event.received.count" + } + } + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.errors.count" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.valid.count" + } + } + } + } + } + } + } + } + } + } + }, + "otlp": { + "properties": { + "grpc": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.valid.count" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.consumer.unsupported_dropped" + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.valid.count" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.valid.count" + } + } + } + } + } + } + } + } + }, + "http": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.valid.count" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.consumer.unsupported_dropped" + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.valid.count" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.valid.count" + } + } + } + } } } } @@ -1189,248 +2053,180 @@ } } }, - "decoder": { + "processor": { "properties": { - "deflate": { + "error": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.count" + "path": "beat.stats.apm_server.processor.error.transformations" } } }, - "gzip": { + "metric": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.count" + "path": "beat.stats.apm_server.processor.metric.transformations" } } }, - "missing-content-length": { + "span": { "properties": { - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.missing-content-length.count" + "path": "beat.stats.apm_server.processor.span.transformations" } } }, - "reader": { + "stream": { "properties": { - "count": { + "accepted": { "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.count" + "path": "beat.stats.apm_server.processor.stream.accepted" }, - "size": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.size" + "errors": { + "properties": { + "invalid": { + "type": "alias", + "path": "beat.stats.apm_server.processor.stream.errors.invalid" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.processor.stream.errors.toolarge" + } + } } } }, - "uncompressed": { + "transaction": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.count" + "path": "beat.stats.apm_server.processor.transaction.transformations" } } } } }, - "processor": { + "root": { "properties": { - "error": { + "request": { "properties": { - "decoding": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.count" - }, - "errors": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.errors" - } - } - }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.spans" - }, - "stacktraces": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.stacktraces" - }, - "transformations": { + "path": "beat.stats.apm_server.root.request.count" + } + } + }, + "response": { + "properties": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.transformations" + "path": "beat.stats.apm_server.root.response.count" }, - "validation": { + "errors": { "properties": { + "closed": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.closed" + }, "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.count" + "path": "beat.stats.apm_server.root.response.errors.count" }, - "errors": { + "decode": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.errors" - } - } - } - } - }, - "metric": { - "properties": { - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.decode" + }, + "forbidden": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.forbidden" }, - "errors": { + "internal": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.errors" - } - } - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.metric.transformations" - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.internal" + }, + "invalidquery": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.count" + "path": "beat.stats.apm_server.root.response.errors.invalidquery" }, - "errors": { + "method": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.errors" - } - } - } - } - }, - "sourcemap": { - "properties": { - "counter": { - "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.counter" - }, - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.method" + }, + "notfound": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.notfound" }, - "errors": { + "queue": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.errors" - } - } - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.queue" + }, + "ratelimit": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.count" + "path": "beat.stats.apm_server.root.response.errors.ratelimit" }, - "errors": { + "timeout": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.errors" - } - } - } - } - }, - "span": { - "properties": { - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.span.transformations" - } - } - }, - "transaction": { - "properties": { - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.timeout" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.toolarge" + }, + "unauthorized": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.unauthorized" }, - "errors": { + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.unavailable" + }, + "validate": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.errors" + "path": "beat.stats.apm_server.root.response.errors.validate" } } }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.spans" - }, - "stacktraces": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.stacktraces" - }, - "transactions": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transactions" - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transformations" - }, - "validation": { + "valid": { "properties": { + "accepted": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.valid.accepted" + }, "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.count" + "path": "beat.stats.apm_server.root.response.valid.count" }, - "errors": { + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.valid.notmodified" + }, + "ok": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.errors" + "path": "beat.stats.apm_server.root.response.valid.ok" } } } } + }, + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.root.unset" + } + } + }, + "sampling": { + "properties": { + "transactions_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.sampling.transactions_dropped" } } }, "server": { "properties": { - "concurrent": { - "properties": { - "wait": { - "properties": { - "ms": { - "type": "alias", - "path": "beat.stats.apm_server.server.concurrent.wait.ms" - } - } - } - } - }, "request": { "properties": { "count": { @@ -1471,10 +2267,18 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.internal" }, + "invalidquery": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.invalidquery" + }, "method": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.method" }, + "notfound": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.notfound" + }, "queue": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.queue" @@ -1483,6 +2287,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.ratelimit" }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.timeout" + }, "toolarge": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.toolarge" @@ -1491,6 +2299,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.unauthorized" }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.unavailable" + }, "validate": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.validate" @@ -1507,6 +2319,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.valid.count" }, + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.valid.notmodified" + }, "ok": { "type": "alias", "path": "beat.stats.apm_server.server.response.valid.ok" @@ -1514,49 +2330,10 @@ } } } - } - } - }, - "sampling": { - "properties": { - "transactions_dropped": { - "type": "long" }, - "tail": { - "properties": { - "dynamic_service_groups": { - "type": "long" - }, - "storage": { - "properties": { - "lsm_size": { - "type": "long" - }, - "value_log_size": { - "type": "long" - } - } - }, - "events": { - "properties": { - "processed": { - "type": "long" - }, - "dropped": { - "type": "long" - }, - "stored": { - "type": "long" - }, - "sampled": { - "type": "long" - }, - "head_unsampled": { - "type": "long" - } - } - } - } + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.server.unset" } } } @@ -1985,6 +2762,42 @@ } } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.bulk_requests.available" + }, + "completed": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.bulk_requests.completed" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.active" + }, + "created": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.created" + }, + "destroyed": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.destroyed" + } + } + } + } + } + } } } }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json index 6dee05564cc10..d699317c29da3 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json @@ -346,17 +346,11 @@ "response": { "properties": { "count": { - "type": "long" + "type": "long" }, "errors": { "properties": { - "validate": { - "type": "long" - }, - "internal": { - "type": "long" - }, - "queue": { + "closed": { "type": "long" }, "count": { @@ -365,13 +359,13 @@ "decode": { "type": "long" }, - "toolarge": { + "forbidden": { "type": "long" }, - "unavailable": { + "internal": { "type": "long" }, - "forbidden": { + "invalidquery": { "type": "long" }, "method": { @@ -380,125 +374,454 @@ "notfound": { "type": "long" }, - "invalidquery": { + "queue": { "type": "long" }, "ratelimit": { "type": "long" }, - "closed": { + "timeout": { + "type": "long" + }, + "toolarge": { "type": "long" }, "unauthorized": { "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { + "type": "long" } } }, "valid": { "properties": { - "notmodified": { + "accepted": { "type": "long" }, "count": { "type": "long" }, - "ok": { + "notmodified": { "type": "long" }, - "accepted": { - "type": "long" - } - } - }, - "unset": { - "type": "long" - }, - "request": { - "properties": { - "count": { + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } }, - "server": { + "agentcfg": { "properties": { - "request": { + "elasticsearch": { "properties": { - "count": { - "type": "long" - } - } - }, - "concurrent": { - "properties": { - "wait": { + "cache": { "properties": { - "ms": { - "type": "long" + "entries": { + "properties": { + "count": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } } } - } - } - }, - "response": { - "properties": { - "count": { - "type": "long" }, - "errors": { + "fetch": { "properties": { - "count": { + "es": { "type": "long" }, - "toolarge": { + "fallback": { "type": "long" }, - "validate": { + "invalid": { "type": "long" }, - "ratelimit": { + "unavailable": { "type": "long" + } + } + } + } + } + } + }, + "jaeger": { + "properties": { + "grpc": { + "properties": { + "collect": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "queue": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "sampling": { + "properties": { + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "long" + } + } + } + } }, - "closed": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "forbidden": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + }, + "otlp": { + "properties": { + "grpc": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "concurrency": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "unauthorized": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "internal": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "decode": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "http": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "method": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "valid": { + "metrics": { "properties": { - "ok": { - "type": "long" + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "accepted": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "count": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } @@ -506,195 +829,138 @@ } } }, - "decoder": { + "processor": { "properties": { - "deflate": { + "error": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "gzip": { + "metric": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "uncompressed": { + "span": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "reader": { + "stream": { "properties": { - "size": { + "accepted": { "type": "long" }, - "count": { - "type": "long" + "errors": { + "properties": { + "invalid": { + "type": "long" + }, + "toolarge": { + "type": "long" + } + } } } }, - "missing-content-length": { + "transaction": { "properties": { - "count": { + "transformations": { "type": "long" } } } } - }, - "processor": { + "root": { "properties": { - "metric": { + "request": { "properties": { - "decoding": { - "properties": { - "errors": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "transformations": { + "count": { "type": "long" } } }, - "sourcemap": { + "response": { "properties": { - "counter": { + "count": { "type": "long" }, - "decoding": { + "errors": { "properties": { - "errors": { + "closed": { "type": "long" }, "count": { "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { + }, + "decode": { "type": "long" }, - "count": { + "forbidden": { "type": "long" - } - } - } - } - }, - "transaction": { - "properties": { - "decoding": { - "properties": { - "errors": { + }, + "internal": { "type": "long" }, - "count": { + "invalidquery": { "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { + }, + "method": { "type": "long" }, - "count": { + "notfound": { "type": "long" - } - } - }, - "transformations": { - "type": "long" - }, - "transactions": { - "type": "long" - }, - "spans": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "frames": { - "type": "long" - } - } - }, - "error": { - "properties": { - "decoding": { - "properties": { - "errors": { + }, + "queue": { "type": "long" }, - "count": { + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { "type": "long" } } }, - "validation": { + "valid": { "properties": { - "errors": { + "accepted": { "type": "long" }, "count": { "type": "long" + }, + "notmodified": { + "type": "long" + }, + "ok": { + "type": "long" } } - }, - "transformations": { - "type": "long" - }, - "errors": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "frames": { - "type": "long" } } }, - "span": { - "properties": { - "transformations": { - "type": "long" - } - } + "unset": { + "type": "long" } } }, @@ -702,42 +968,95 @@ "properties": { "transactions_dropped": { "type": "long" + } + } + }, + "server": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "tail": { + "response": { "properties": { - "dynamic_service_groups": { + "count": { "type": "long" }, - "storage": { + "errors": { "properties": { - "lsm_size": { + "closed": { "type": "long" }, - "value_log_size": { + "concurrency": { + "type": "long" + }, + "count": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "internal": { + "type": "long" + }, + "invalidquery": { + "type": "long" + }, + "method": { + "type": "long" + }, + "notfound": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { "type": "long" } } }, - "events": { + "valid": { "properties": { - "processed": { - "type": "long" - }, - "dropped": { + "accepted": { "type": "long" }, - "stored": { + "count": { "type": "long" }, - "sampled": { + "notmodified": { "type": "long" }, - "head_unsampled": { + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } } @@ -893,6 +1212,37 @@ } } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "long" + }, + "completed": { + "type": "long" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "long" + }, + "created": { + "type": "long" + }, + "destroyed": { + "type": "long" + } + } + } + } + } + } } } }, diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml index 5cfb016e1b6df..b0f850d09f76d 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml @@ -76,6 +76,42 @@ setup: - match: { custom_scheduling: {} } - match: { filtering.0.domain: DEFAULT } + +--- +'Create Connector - Check for missing keys': + - do: + connector.put: + connector_id: test-connector + body: + index_name: search-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + + - match: { result: 'created' } + + - do: + connector.get: + connector_id: test-connector + + - match: { id: test-connector } + - match: { index_name: search-test } + - match: { name: my-connector } + - match: { language: pl } + - match: { is_native: false } + - match: { service_type: super-connector } + + # check keys that are not populated upon connector creation + - is_false: api_key_id + - is_false: api_key_secret_id + - is_false: description + - is_false: error + - is_false: features + - is_false: last_seen + - is_false: sync_cursor + + --- 'Create Connector - Resource already exists': - do: diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java index a9c488b024d49..46275bb623b7a 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java @@ -377,25 +377,61 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc if (connectorId != null) { builder.field(ID_FIELD.getPreferredName(), connectorId); } - builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); - builder.field(API_KEY_SECRET_ID_FIELD.getPreferredName(), apiKeySecretId); - builder.xContentValuesMap(CONFIGURATION_FIELD.getPreferredName(), configuration); - builder.xContentValuesMap(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); - builder.field(DESCRIPTION_FIELD.getPreferredName(), description); - builder.field(ERROR_FIELD.getPreferredName(), error); - builder.field(FEATURES_FIELD.getPreferredName(), features); - builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); - builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); + if (apiKeyId != null) { + builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); + } + if (apiKeySecretId != null) { + builder.field(API_KEY_SECRET_ID_FIELD.getPreferredName(), apiKeySecretId); + } + if (configuration != null) { + builder.xContentValuesMap(CONFIGURATION_FIELD.getPreferredName(), configuration); + } + if (customScheduling != null) { + builder.xContentValuesMap(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); + } + if (description != null) { + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + } + if (error != null) { + builder.field(ERROR_FIELD.getPreferredName(), error); + } + if (features != null) { + builder.field(FEATURES_FIELD.getPreferredName(), features); + } + if (filtering != null) { + builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); + } + if (indexName != null) { + builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); + } builder.field(IS_NATIVE_FIELD.getPreferredName(), isNative); - builder.field(LANGUAGE_FIELD.getPreferredName(), language); - builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); - syncInfo.toXContent(builder, params); - builder.field(NAME_FIELD.getPreferredName(), name); - builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); - builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); - builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); - builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); - builder.field(STATUS_FIELD.getPreferredName(), status.toString()); + if (language != null) { + builder.field(LANGUAGE_FIELD.getPreferredName(), language); + } + if (lastSeen != null) { + builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); + } + if (syncInfo != null) { + syncInfo.toXContent(builder, params); + } + if (name != null) { + builder.field(NAME_FIELD.getPreferredName(), name); + } + if (pipeline != null) { + builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); + } + if (scheduling != null) { + builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); + } + if (serviceType != null) { + builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); + } + if (syncCursor != null) { + builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); + } + if (status != null) { + builder.field(STATUS_FIELD.getPreferredName(), status.toString()); + } builder.field(SYNC_NOW_FIELD.getPreferredName(), syncNow); } diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 3e61b9bc5e51c..e5816d0b7c78b 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -400,6 +400,11 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java" } File stateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st") + template { + it.properties = booleanProperties + it.inputFile = stateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanState.java" + } template { it.properties = intProperties it.inputFile = stateInputFile @@ -453,6 +458,11 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/data/BooleanLookup.java" } File arrayStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st") + template { + it.properties = booleanProperties + it.inputFile = arrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanArrayState.java" + } template { it.properties = intProperties it.inputFile = arrayStateInputFile diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 1127d4b4ccb72..b3d32a82cc7a9 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -445,6 +445,8 @@ String intermediateStateRowAccess() { private String primitiveStateMethod() { switch (stateType.toString()) { + case "org.elasticsearch.compute.aggregation.BooleanState": + return "booleanValue"; case "org.elasticsearch.compute.aggregation.IntState": return "intValue"; case "org.elasticsearch.compute.aggregation.LongState": @@ -494,6 +496,9 @@ private MethodSpec evaluateFinal() { private void primitiveStateToResult(MethodSpec.Builder builder) { switch (stateType.toString()) { + case "org.elasticsearch.compute.aggregation.BooleanState": + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1)"); + return; case "org.elasticsearch.compute.aggregation.IntState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1)"); return; @@ -531,8 +536,9 @@ private MethodSpec close() { private boolean hasPrimitiveState() { return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.LongState", - "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.FloatState" -> true; + case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.IntState", + "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.DoubleState", + "org.elasticsearch.compute.aggregation.FloatState" -> true; default -> false; }; } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index c9cdcfe42fddd..79df41f304c06 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -584,8 +584,9 @@ private MethodSpec close() { private boolean hasPrimitiveState() { return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.IntArrayState", "org.elasticsearch.compute.aggregation.LongArrayState", - "org.elasticsearch.compute.aggregation.DoubleArrayState", "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; + case "org.elasticsearch.compute.aggregation.BooleanArrayState", "org.elasticsearch.compute.aggregation.IntArrayState", + "org.elasticsearch.compute.aggregation.LongArrayState", "org.elasticsearch.compute.aggregation.DoubleArrayState", + "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; default -> false; }; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanArrayState.java new file mode 100644 index 0000000000000..79f4a88d403c6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanArrayState.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of booleans. It is created in a mode where it + * won't track the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class BooleanArrayState extends AbstractArrayState implements GroupingAggregatorState { + private final boolean init; + + private BitArray values; + private int size; + + BooleanArrayState(BigArrays bigArrays, boolean init) { + super(bigArrays); + this.values = new BitArray(1, bigArrays); + this.size = 1; + this.values.set(0, init); + this.init = init; + } + + boolean get(int groupId) { + return values.get(groupId); + } + + boolean getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, boolean value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds()) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendBoolean(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + builder.appendBoolean(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 2; + try ( + var valuesBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendBoolean(values.get(group)); + } else { + valuesBuilder.appendBoolean(false); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java new file mode 100644 index 0000000000000..7d225c7c06a72 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single boolean. + * This class is generated. Do not edit it. + */ +final class BooleanState implements AggregatorState { + private boolean value; + private boolean seen; + + BooleanState() { + this(false); + } + + BooleanState(boolean init) { + this.value = init; + } + + boolean booleanValue() { + return value; + } + + void booleanValue(boolean value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 2; + blocks[offset + 0] = driverContext.blockFactory().newConstantBooleanBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunction.java new file mode 100644 index 0000000000000..2ffbcfc2d9458 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunction.java @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MaxBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBooleanAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final BooleanState state; + + private final List channels; + + public MaxBooleanAggregatorFunction(DriverContext driverContext, List channels, + BooleanState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MaxBooleanAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MaxBooleanAggregatorFunction(driverContext, channels, new BooleanState(MaxBooleanAggregator.init())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BooleanBlock block = page.getBlock(channels.get(0)); + BooleanVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BooleanVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + state.booleanValue(MaxBooleanAggregator.combine(state.booleanValue(), vector.getBoolean(i))); + } + } + + private void addRawBlock(BooleanBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + state.booleanValue(MaxBooleanAggregator.combine(state.booleanValue(), block.getBoolean(i))); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BooleanVector max = ((BooleanBlock) maxUncast).asVector(); + assert max.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + if (seen.getBoolean(0)) { + state.booleanValue(MaxBooleanAggregator.combine(state.booleanValue(), max.getBoolean(0))); + state.seen(true); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..e5bbf63ddee07 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MaxBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBooleanAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MaxBooleanAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MaxBooleanAggregatorFunction aggregator(DriverContext driverContext) { + return MaxBooleanAggregatorFunction.create(driverContext, channels); + } + + @Override + public MaxBooleanGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MaxBooleanGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "max of booleans"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..b72ff8354cb12 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java @@ -0,0 +1,204 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MaxBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBooleanGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final BooleanArrayState state; + + private final List channels; + + private final DriverContext driverContext; + + public MaxBooleanGroupingAggregatorFunction(List channels, BooleanArrayState state, + DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MaxBooleanGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MaxBooleanGroupingAggregatorFunction(channels, new BooleanArrayState(driverContext.bigArrays(), MaxBooleanAggregator.init()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BooleanBlock valuesBlock = page.getBlock(channels.get(0)); + BooleanVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BooleanVector max = ((BooleanBlock) maxUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert max.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MaxBooleanAggregator.combineIntermediate(state, groupId, max.getBoolean(groupPosition + positionOffset), seen.getBoolean(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + BooleanArrayState inState = ((MaxBooleanGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + if (inState.hasValue(position)) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), inState.get(position))); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = state.toValuesBlock(selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunction.java new file mode 100644 index 0000000000000..101a6c7f9169a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunction.java @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MinBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBooleanAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final BooleanState state; + + private final List channels; + + public MinBooleanAggregatorFunction(DriverContext driverContext, List channels, + BooleanState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MinBooleanAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MinBooleanAggregatorFunction(driverContext, channels, new BooleanState(MinBooleanAggregator.init())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BooleanBlock block = page.getBlock(channels.get(0)); + BooleanVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BooleanVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + state.booleanValue(MinBooleanAggregator.combine(state.booleanValue(), vector.getBoolean(i))); + } + } + + private void addRawBlock(BooleanBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + state.booleanValue(MinBooleanAggregator.combine(state.booleanValue(), block.getBoolean(i))); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BooleanVector min = ((BooleanBlock) minUncast).asVector(); + assert min.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + if (seen.getBoolean(0)) { + state.booleanValue(MinBooleanAggregator.combine(state.booleanValue(), min.getBoolean(0))); + state.seen(true); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..f66dc6e67e0fd --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MinBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBooleanAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MinBooleanAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MinBooleanAggregatorFunction aggregator(DriverContext driverContext) { + return MinBooleanAggregatorFunction.create(driverContext, channels); + } + + @Override + public MinBooleanGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MinBooleanGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "min of booleans"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..6175cad3924e2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java @@ -0,0 +1,206 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MinBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBooleanGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final BooleanArrayState state; + + private final List channels; + + private final DriverContext driverContext; + + public MinBooleanGroupingAggregatorFunction(List channels, BooleanArrayState state, + DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MinBooleanGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MinBooleanGroupingAggregatorFunction(channels, new BooleanArrayState(driverContext.bigArrays(), MinBooleanAggregator.init()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BooleanBlock valuesBlock = page.getBlock(channels.get(0)); + BooleanVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BooleanVector min = ((BooleanBlock) minUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert min.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (seen.getBoolean(groupPosition + positionOffset)) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), min.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + BooleanArrayState inState = ((MinBooleanGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + if (inState.hasValue(position)) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), inState.get(position))); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = state.toValuesBlock(selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregator.java new file mode 100644 index 0000000000000..79d0cd4d7492f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregator.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator({ @IntermediateState(name = "max", type = "BOOLEAN"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MaxBooleanAggregator { + + public static boolean init() { + return false; + } + + public static boolean combine(boolean current, boolean v) { + return current || v; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBooleanAggregator.java new file mode 100644 index 0000000000000..372a5d988688f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBooleanAggregator.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator({ @IntermediateState(name = "min", type = "BOOLEAN"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MinBooleanAggregator { + + public static boolean init() { + return true; + } + + public static boolean combine(boolean current, boolean v) { + return current && v; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st index 18686928f14a8..10dbd9f423725 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st @@ -8,7 +8,11 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.util.BigArrays; +$if(boolean)$ +import org.elasticsearch.common.util.BitArray; +$else$ import org.elasticsearch.common.util.$Type$Array; +$endif$ import org.elasticsearch.compute.data.Block; $if(long)$ import org.elasticsearch.compute.data.IntVector; @@ -17,7 +21,7 @@ import org.elasticsearch.compute.data.$Type$Block; $if(int)$ import org.elasticsearch.compute.data.$Type$Vector; $endif$ -$if(double||float)$ +$if(boolean||double||float)$ import org.elasticsearch.compute.data.IntVector; $endif$ import org.elasticsearch.compute.operator.DriverContext; @@ -41,11 +45,22 @@ import org.elasticsearch.core.Releasables; final class $Type$ArrayState extends AbstractArrayState implements GroupingAggregatorState { private final $type$ init; +$if(boolean)$ + private BitArray values; + private int size; + +$else$ private $Type$Array values; +$endif$ $Type$ArrayState(BigArrays bigArrays, $type$ init) { super(bigArrays); +$if(boolean)$ + this.values = new BitArray(1, bigArrays); + this.size = 1; +$else$ this.values = bigArrays.new$Type$Array(1, false); +$endif$ this.values.set(0, init); this.init = init; } @@ -95,11 +110,18 @@ $endif$ } private void ensureCapacity(int groupId) { +$if(boolean)$ + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } +$else$ if (groupId >= values.size()) { long prevSize = values.size(); values = bigArrays.grow(values, groupId + 1); values.fill(prevSize, values.size(), init); } +$endif$ } /** Extracts an intermediate view of the contents of this state. */ @@ -120,7 +142,7 @@ $endif$ if (group < values.size()) { valuesBuilder.append$Type$(values.get(group)); } else { - valuesBuilder.append$Type$(0); // TODO can we just use null? + valuesBuilder.append$Type$($if(boolean)$false$else$0$endif$); // TODO can we just use null? } hasValueBuilder.appendBoolean(i, hasValue(group)); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st index 427d1a0c312cc..2d2d706c9454f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st @@ -19,7 +19,11 @@ final class $Type$State implements AggregatorState { private boolean seen; $Type$State() { +$if(boolean)$ + this(false); +$else$ this(0); +$endif$ } $Type$State($type$ init) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index f647f4fba0225..a365a655370a2 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -250,21 +250,20 @@ public boolean isForceExecution() { protected void doRun() { assert Transports.assertNotTransportThread("reaping inactive exchanges can be expensive"); assert ThreadPool.assertNotScheduleThread("reaping inactive exchanges can be expensive"); + logger.debug("start removing inactive sinks"); final long nowInMillis = threadPool.relativeTimeInMillis(); for (Map.Entry e : sinks.entrySet()) { ExchangeSinkHandler sink = e.getValue(); if (sink.hasData() && sink.hasListeners()) { continue; } - long elapsed = nowInMillis - sink.lastUpdatedTimeInMillis(); - if (elapsed > keepAlive.millis()) { + long elapsedInMillis = nowInMillis - sink.lastUpdatedTimeInMillis(); + if (elapsedInMillis > keepAlive.millis()) { + TimeValue elapsedTime = TimeValue.timeValueMillis(elapsedInMillis); + logger.debug("removed sink {} inactive for {}", e.getKey(), elapsedTime); finishSinkHandler( e.getKey(), - new ElasticsearchTimeoutException( - "Exchange sink {} has been inactive for {}", - e.getKey(), - TimeValue.timeValueMillis(elapsed) - ) + new ElasticsearchTimeoutException("Exchange sink {} has been inactive for {}", e.getKey(), elapsedTime) ); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java new file mode 100644 index 0000000000000..11119aade12ff --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.operator.SequenceBooleanBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxBooleanAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBooleanBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(l -> randomBoolean()).toList()); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxBooleanAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of booleans"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Boolean max = input.stream().flatMap(b -> allBooleans(b)).max(Comparator.naturalOrder()).get(); + assertThat(((BooleanBlock) result).getBoolean(0), equalTo(max)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionTests.java new file mode 100644 index 0000000000000..74cdca31da34b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.operator.SequenceBooleanBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinBooleanAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBooleanBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(l -> randomBoolean()).toList()); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinBooleanAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of booleans"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Boolean min = input.stream().flatMap(b -> allBooleans(b)).min(Comparator.naturalOrder()).get(); + assertThat(((BooleanBlock) result).getBoolean(0), equalTo(min)); + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 925b2fb9e5533..e7fa027ff1d6e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -1,4 +1,4 @@ -metaFunctionsSynopsis#[skip:-8.14.99] +metaFunctionsSynopsis#[skip:-8.15.99] meta functions | keep synopsis; synopsis:keyword @@ -38,10 +38,10 @@ double e() "double log(?base:integer|unsigned_long|long|double, number:integer|unsigned_long|long|double)" "double log10(number:double|integer|long|unsigned_long)" "keyword|text ltrim(string:keyword|text)" -"double|integer|long|date max(number:double|integer|long|date)" +"boolean|double|integer|long|date max(field:boolean|double|integer|long|date)" "double|integer|long median(number:double|integer|long)" "double|integer|long median_absolute_deviation(number:double|integer|long)" -"double|integer|long|date min(number:double|integer|long|date)" +"boolean|double|integer|long|date min(field:boolean|double|integer|long|date)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_append(field1:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, field2:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version)" "double mv_avg(number:double|integer|long|unsigned_long)" "keyword mv_concat(string:text|keyword, delim:text|keyword)" @@ -116,7 +116,7 @@ double tau() "double weighted_avg(number:double|integer|long, weight:double|integer|long)" ; -metaFunctionsArgs#[skip:-8.14.99] +metaFunctionsArgs#[skip:-8.15.99] META functions | EVAL name = SUBSTRING(name, 0, 14) | KEEP name, argNames, argTypes, argDescriptions; @@ -158,10 +158,10 @@ locate |[string, substring, start] |["keyword|text", "keyword|te log |[base, number] |["integer|unsigned_long|long|double", "integer|unsigned_long|long|double"] |["Base of logarithm. If `null`\, the function returns `null`. If not provided\, this function returns the natural logarithm (base e) of a value.", "Numeric expression. If `null`\, the function returns `null`."] log10 |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. ltrim |string |"keyword|text" |String expression. If `null`, the function returns `null`. -max |number |"double|integer|long|date" |[""] +max |field |"boolean|double|integer|long|date" |[""] median |number |"double|integer|long" |[""] median_absolut|number |"double|integer|long" |[""] -min |number |"double|integer|long|date" |[""] +min |field |"boolean|double|integer|long|date" |[""] mv_append |[field1, field2] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version"] | ["", ""] mv_avg |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_concat |[string, delim] |["text|keyword", "text|keyword"] |[Multivalue expression., Delimiter.] @@ -236,7 +236,7 @@ values |field |"boolean|date|double|integer weighted_avg |[number, weight] |["double|integer|long", "double|integer|long"] |[A numeric value., A numeric weight.] ; -metaFunctionsDescription#[skip:-8.14.99] +metaFunctionsDescription#[skip:-8.15.99] META functions | EVAL name = SUBSTRING(name, 0, 14) | KEEP name, description @@ -279,10 +279,10 @@ locate |Returns an integer that indicates the position of a keyword subst log |Returns the logarithm of a value to a base. The input can be any numeric value, the return value is always a double. Logs of zero, negative numbers, and base of one return `null` as well as a warning. log10 |Returns the logarithm of a value to base 10. The input can be any numeric value, the return value is always a double. Logs of 0 and negative numbers return `null` as well as a warning. ltrim |Removes leading whitespaces from a string. -max |The maximum value of a numeric field. +max |The maximum value of a field. median |The value that is greater than half of all values and less than half of all values. median_absolut|The median absolute deviation, a measure of variability. -min |The minimum value of a numeric field. +min |The minimum value of a field. mv_append |Concatenates values of two multi-value fields. mv_avg |Converts a multivalued field into a single valued field containing the average of all of the values. mv_concat |Converts a multivalued string expression into a single valued column containing the concatenation of all values separated by a delimiter. @@ -357,7 +357,7 @@ values |Collect values for a field. weighted_avg |The weighted average of a numeric field. ; -metaFunctionsRemaining#[skip:-8.14.99] +metaFunctionsRemaining#[skip:-8.15.99] META functions | EVAL name = SUBSTRING(name, 0, 14) | KEEP name, * @@ -401,10 +401,10 @@ locate |integer log |double |[true, false] |false |false log10 |double |false |false |false ltrim |"keyword|text" |false |false |false -max |"double|integer|long|date" |false |false |true +max |"boolean|double|integer|long|date" |false |false |true median |"double|integer|long" |false |false |true median_absolut|"double|integer|long" |false |false |true -min |"double|integer|long|date" |false |false |true +min |"boolean|double|integer|long|date" |false |false |true mv_append |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false] |false |false mv_avg |double |false |false |false mv_concat |keyword |[false, false] |false |false @@ -479,7 +479,7 @@ values |"boolean|date|double|integer|ip|keyword|long|text|version" weighted_avg |"double" |[false, false] |false |true ; -metaFunctionsFiltered#[skip:-8.14.99] +metaFunctionsFiltered#[skip:-8.15.99] META FUNCTIONS | WHERE STARTS_WITH(name, "sin") ; @@ -489,9 +489,7 @@ sin |"double sin(angle:double|integer|long|unsigned_long)" |angle sinh |"double sinh(angle:double|integer|long|unsigned_long)" |angle |"double|integer|long|unsigned_long" | "An angle, in radians. If `null`, the function returns `null`." | double | "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of an angle." | false | false | false ; - -// see https://github.com/elastic/elasticsearch/issues/102120 -countFunctions#[skip:-8.14.99, reason:BIN added] +countFunctions#[skip:-8.15.99] meta functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index e4fc0580e4ba2..2d306cd8fd2a0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -31,6 +31,44 @@ MIN(languages):integer // end::min-result[] ; +maxOfBoolean +required_capability: agg_max_min_boolean_support +from employees | stats s = max(still_hired); + +s:boolean +true +; + +maxOfBooleanExpression +required_capability: agg_max_min_boolean_support +from employees +| eval x = salary is not null +| where emp_no > 10050 +| stats a = max(salary is not null), b = max(x), c = max(case(salary is null, true, false)), d = max(is_rehired); + +a:boolean | b:boolean | c:boolean | d:boolean +true | true | false | true +; + +minOfBooleanExpression +required_capability: agg_max_min_boolean_support +from employees +| eval x = salary is not null +| where emp_no > 10050 +| stats a = min(salary is not null), b = min(x), c = min(case(salary is null, true, false)), d = min(is_rehired); + +a:boolean | b:boolean | c:boolean | d:boolean +true | true | false | false +; + +minOfBoolean +required_capability: agg_max_min_boolean_support +from employees | stats s = min(still_hired); + +s:boolean +false +; + maxOfShort // short becomes int until https://github.com/elastic/elasticsearch-internal/issues/724 from employees | stats l = max(languages.short); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index 5783489195458..349f968666132 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -514,6 +514,21 @@ mc:l | count:l 7 | 2 ; +multiIndexTsLongStatsStats +required_capability: union_types +required_capability: union_types_agg_cast + +FROM sample_data, sample_data_ts_long +| EVAL ts = TO_STRING(@timestamp) +| STATS count = COUNT(*) BY ts +| STATS mc = COUNT(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +14 | 1 +; + multiIndexTsLongRenameStats required_capability: union_types diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 22e3de8499bc1..84738f733f86b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -44,7 +45,11 @@ public void ensureExchangesAreReleased() throws Exception { for (String node : internalCluster().getNodeNames()) { TransportEsqlQueryAction esqlQueryAction = internalCluster().getInstance(TransportEsqlQueryAction.class, node); ExchangeService exchangeService = esqlQueryAction.exchangeService(); - assertBusy(() -> assertTrue("Leftover exchanges " + exchangeService + " on node " + node, exchangeService.isEmpty())); + assertBusy(() -> { + if (exchangeService.lifecycleState() == Lifecycle.State.STARTED) { + assertTrue("Leftover exchanges " + exchangeService + " on node " + node, exchangeService.isEmpty()); + } + }); } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java index da9aa96876fd7..f85de51101af5 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java @@ -54,7 +54,7 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder() - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .build(); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index 800067fef8b1c..df6a1e00b0212 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -68,7 +68,7 @@ public List> getSettings() { return List.of( Setting.timeSetting( ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, - TimeValue.timeValueMillis(between(1000, 3000)), + TimeValue.timeValueMillis(between(3000, 4000)), Setting.Property.NodeScope ) ); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java index 5be816712cf20..cdfa6eb2d03f3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -111,7 +111,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getDefault(Settings.EMPTY) ) - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 256))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 1024))) // allow reading pages from network can trip the circuit breaker diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java index 089cb4a9a5084..37833d8aed2d3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteResponse; @@ -35,7 +34,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105543") @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class EsqlActionBreakerIT extends EsqlActionIT { @@ -72,7 +70,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getDefault(Settings.EMPTY) ) - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 256))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 1024))) // allow reading pages from network can trip the circuit breaker diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java index df1b2c9f00f49..e9eada5def0dc 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java @@ -52,7 +52,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings settings = Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(DEFAULT_SETTINGS) - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(1000, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .build(); logger.info("settings {}", settings); return settings; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java index 99e4a57757e38..8443b8d99d04a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql; -import org.elasticsearch.xpack.esql.core.common.Failure; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failure; +import org.elasticsearch.xpack.esql.common.Failures; import java.util.Collection; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index fa822b50ffcf5..3353a9352a4bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -49,6 +49,11 @@ public enum Cap { */ AGG_TOP, + /** + * Support for booleans in aggregations {@code MAX} and {@code MIN}. + */ + AGG_MAX_MIN_BOOLEAN_SUPPORT, + /** * Optimization for ST_CENTROID changed some results in cartesian data. #108713 */ @@ -117,7 +122,12 @@ public enum Cap { * Fix to GROK validation in case of multiple fields with same name and different types * https://github.com/elastic/elasticsearch/issues/110533 */ - GROK_VALIDATION; + GROK_VALIDATION, + + /** + * Fix for union-types when aggregating over an inline conversion with conversion function. Done in #110652. + */ + UNION_TYPES_INLINE_FIX; private final boolean snapshotOnly; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 30ffffd4770a9..add1f74cc3f04 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -16,8 +16,8 @@ import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.analysis.AnalyzerRules.BaseAnalyzerRule; import org.elasticsearch.xpack.esql.analysis.AnalyzerRules.ParameterizedAnalyzerRule; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; -import org.elasticsearch.xpack.esql.core.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; @@ -1088,7 +1088,11 @@ protected LogicalPlan doRule(LogicalPlan plan) { // In ResolveRefs the aggregates are resolved from the groupings, which might have an unresolved MultiTypeEsField. // Now that we have resolved those, we need to re-resolve the aggregates. - if (plan instanceof EsqlAggregate agg && agg.expressionsResolved() == false) { + if (plan instanceof EsqlAggregate agg) { + // If the union-types resolution occurred in a child of the aggregate, we need to check the groupings + plan = agg.transformExpressionsOnly(FieldAttribute.class, UnresolveUnionTypes::checkUnresolved); + + // Aggregates where the grouping key comes from a union-type field need to be resolved against the grouping key Map resolved = new HashMap<>(); for (Expression e : agg.groupings()) { Attribute attr = Expressions.attribute(e); @@ -1096,7 +1100,7 @@ protected LogicalPlan doRule(LogicalPlan plan) { resolved.put(attr, e); } } - plan = agg.transformExpressionsOnly(UnresolvedAttribute.class, ua -> resolveAttribute(ua, resolved)); + plan = plan.transformExpressionsOnly(UnresolvedAttribute.class, ua -> resolveAttribute(ua, resolved)); } // Otherwise drop the converted attributes after the alias function, as they are only needed for this function, and @@ -1222,9 +1226,8 @@ protected LogicalPlan rule(LogicalPlan plan) { return plan.transformExpressionsOnly(FieldAttribute.class, UnresolveUnionTypes::checkUnresolved); } - private static Attribute checkUnresolved(FieldAttribute fa) { - var field = fa.field(); - if (field instanceof InvalidMappedField imf) { + static Attribute checkUnresolved(FieldAttribute fa) { + if (fa.field() instanceof InvalidMappedField imf) { String unresolvedMessage = "Cannot use field [" + fa.name() + "] due to ambiguities being " + imf.errorMessage(); return new UnresolvedAttribute(fa.source(), fa.name(), fa.qualifier(), fa.id(), unresolvedMessage, null); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 9b90f411c4eb8..a4e0d99b0d3fc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; -import org.elasticsearch.xpack.esql.core.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; @@ -53,7 +53,7 @@ import java.util.function.Consumer; import java.util.stream.Stream; -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; +import static org.elasticsearch.xpack.esql.common.Failure.fail; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java index 4d30f32af5f15..f6733fa3f175c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.capabilities; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; /** * Interface implemented by expressions that require validation post logical optimization, diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failure.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failure.java similarity index 97% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failure.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failure.java index 719ae7ffbd1ca..e5d0fb7ba0b3d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failure.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failure.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.common; +package org.elasticsearch.xpack.esql.common; import org.elasticsearch.xpack.esql.core.tree.Location; import org.elasticsearch.xpack.esql.core.tree.Node; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failures.java similarity index 96% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failures.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failures.java index c06fe94c9a338..fd25cb427d95b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failures.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.common; +package org.elasticsearch.xpack.esql.common; import java.util.Collection; import java.util.LinkedHashSet; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java index dffa723a1f3dd..ffcc26cb6f188 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.expression; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expression.TypeResolution; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 44954a1cfea8b..98748fad681c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -10,10 +10,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MaxBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -22,16 +25,19 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; +import org.elasticsearch.xpack.esql.planner.ToAggregator; import java.io.IOException; import java.util.List; -public class Max extends NumericAggregate implements SurrogateExpression { +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); @FunctionInfo( - returnType = { "double", "integer", "long", "date" }, - description = "The maximum value of a numeric field.", + returnType = { "boolean", "double", "integer", "long", "date" }, + description = "The maximum value of a field.", isAggregation = true, examples = { @Example(file = "stats", tag = "max"), @@ -43,7 +49,7 @@ public class Max extends NumericAggregate implements SurrogateExpression { tag = "docsStatsMaxNestedExpression" ) } ) - public Max(Source source, @Param(name = "number", type = { "double", "integer", "long", "date" }) Expression field) { + public Max(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date" }) Expression field) { super(source, field); } @@ -67,8 +73,16 @@ public Max replaceChildren(List newChildren) { } @Override - protected boolean supportsDates() { - return true; + protected TypeResolution resolveType() { + return TypeResolutions.isType( + this, + e -> e == DataType.BOOLEAN || e == DataType.DATETIME || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + sourceText(), + DEFAULT, + "boolean", + "datetime", + "numeric except unsigned_long or counter types" + ); } @Override @@ -77,18 +91,21 @@ public DataType dataType() { } @Override - protected AggregatorFunctionSupplier longSupplier(List inputChannels) { - return new MaxLongAggregatorFunctionSupplier(inputChannels); - } - - @Override - protected AggregatorFunctionSupplier intSupplier(List inputChannels) { - return new MaxIntAggregatorFunctionSupplier(inputChannels); - } - - @Override - protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { - return new MaxDoubleAggregatorFunctionSupplier(inputChannels); + public final AggregatorFunctionSupplier supplier(List inputChannels) { + DataType type = field().dataType(); + if (type == DataType.BOOLEAN) { + return new MaxBooleanAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.LONG || type == DataType.DATETIME) { + return new MaxLongAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.INTEGER) { + return new MaxIntAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.DOUBLE) { + return new MaxDoubleAggregatorFunctionSupplier(inputChannels); + } + throw EsqlIllegalArgumentException.illegalDataType(type); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index b9f71d86a6fb1..f712786bcff4b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -10,10 +10,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MinBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -22,16 +25,19 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.planner.ToAggregator; import java.io.IOException; import java.util.List; -public class Min extends NumericAggregate implements SurrogateExpression { +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); @FunctionInfo( - returnType = { "double", "integer", "long", "date" }, - description = "The minimum value of a numeric field.", + returnType = { "boolean", "double", "integer", "long", "date" }, + description = "The minimum value of a field.", isAggregation = true, examples = { @Example(file = "stats", tag = "min"), @@ -43,7 +49,7 @@ public class Min extends NumericAggregate implements SurrogateExpression { tag = "docsStatsMinNestedExpression" ) } ) - public Min(Source source, @Param(name = "number", type = { "double", "integer", "long", "date" }) Expression field) { + public Min(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date" }) Expression field) { super(source, field); } @@ -67,28 +73,39 @@ public Min replaceChildren(List newChildren) { } @Override - public DataType dataType() { - return field().dataType(); - } - - @Override - protected boolean supportsDates() { - return true; + protected TypeResolution resolveType() { + return TypeResolutions.isType( + this, + e -> e == DataType.BOOLEAN || e == DataType.DATETIME || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + sourceText(), + DEFAULT, + "boolean", + "datetime", + "numeric except unsigned_long or counter types" + ); } @Override - protected AggregatorFunctionSupplier longSupplier(List inputChannels) { - return new MinLongAggregatorFunctionSupplier(inputChannels); - } - - @Override - protected AggregatorFunctionSupplier intSupplier(List inputChannels) { - return new MinIntAggregatorFunctionSupplier(inputChannels); + public DataType dataType() { + return field().dataType(); } @Override - protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { - return new MinDoubleAggregatorFunctionSupplier(inputChannels); + public final AggregatorFunctionSupplier supplier(List inputChannels) { + DataType type = field().dataType(); + if (type == DataType.BOOLEAN) { + return new MinBooleanAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.LONG || type == DataType.DATETIME) { + return new MinLongAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.INTEGER) { + return new MinIntAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.DOUBLE) { + return new MinDoubleAggregatorFunctionSupplier(inputChannels); + } + throw EsqlIllegalArgumentException.illegalDataType(type); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 40e927404befd..3ce51b8086dd0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -16,7 +16,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.capabilities.Validatable; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Foldables; import org.elasticsearch.xpack.esql.core.expression.Literal; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index 199dc49b46097..ee83236ac6a63 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -30,8 +30,8 @@ import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeInt; import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeLong; import org.elasticsearch.xpack.esql.capabilities.Validatable; -import org.elasticsearch.xpack.esql.core.common.Failure; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failure; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 1b40a1c2b02ad..c03dc46216621 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -17,7 +17,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 284f264b85e1c..50819b8ee7480 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index 007fb3939db0c..cd61b4eb8892c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.capabilities.Validatable; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.LogicalPlanDependencyCheck; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index ecd83fbba022c..bff76fb1a706e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.plan.QueryPlan; @@ -36,7 +36,7 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; +import static org.elasticsearch.xpack.esql.common.Failure.fail; class OptimizerRules { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java index 70c2a9007408a..e9fd6a713945c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java index 77c8e7da5d895..7843464650e37 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.PhysicalPlanDependencyCheck; @@ -18,7 +18,7 @@ import java.util.LinkedHashSet; import java.util.Set; -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; +import static org.elasticsearch.xpack.esql.common.Failure.fail; /** Physical plan verifier. */ public final class PhysicalVerifier { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index e97323f963887..d1e0bdac0bf2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -16,7 +16,7 @@ import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 91433e42033c5..87775d5048752 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -146,6 +146,8 @@ private static Stream, Tuple>> typeAndNames(Class List extraConfigs = List.of(""); if (NumericAggregate.class.isAssignableFrom(clazz)) { types = NUMERIC; + } else if (Max.class.isAssignableFrom(clazz) || Min.class.isAssignableFrom(clazz)) { + types = List.of("Boolean", "Int", "Long", "Double"); } else if (clazz == Count.class) { types = List.of(""); // no extra type distinction } else if (SpatialAggregateFunction.class.isAssignableFrom(clazz)) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 06191d42c92de..d6cd4a5e84d49 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1832,13 +1832,13 @@ public void testUnsupportedTypesInStats() { found value [x] type [unsigned_long] line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:39: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:39: argument of [max(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [max(x)] type [unsigned_long] line 2:47: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:88: argument of [min(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:88: argument of [min(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [min(x)] type [unsigned_long] line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long],\ found value [x] type [unsigned_long] @@ -1852,13 +1852,13 @@ public void testUnsupportedTypesInStats() { Found 7 problems line 2:10: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:18: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:18: argument of [max(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [max(x)] type [version] line 2:26: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:67: argument of [min(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:67: argument of [min(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [min(x)] type [version] line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] line 2:94: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index ad08130c5b0d9..00d12240e67e5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -493,7 +493,8 @@ public void testAggregateOnCounter() { assertThat( error("FROM tests | STATS min(network.bytes_in)", tsdb), equalTo( - "1:20: argument of [min(network.bytes_in)] must be [datetime or numeric except unsigned_long or counter types]," + "1:20: argument of [min(network.bytes_in)] must be" + + " [boolean, datetime or numeric except unsigned_long or counter types]," + " found value [min(network.bytes_in)] type [counter_long]" ) ); @@ -501,7 +502,8 @@ public void testAggregateOnCounter() { assertThat( error("FROM tests | STATS max(network.bytes_in)", tsdb), equalTo( - "1:20: argument of [max(network.bytes_in)] must be [datetime or numeric except unsigned_long or counter types]," + "1:20: argument of [max(network.bytes_in)] must be" + + " [boolean, datetime or numeric except unsigned_long or counter types]," + " found value [max(network.bytes_in)] type [counter_long]" ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java index 5621e63061e15..68f5414302c9d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java @@ -278,4 +278,26 @@ public static List dateCases(int minRows, int maxRows) { return cases; } + + public static List booleanCases(int minRows, int maxRows) { + List cases = new ArrayList<>(); + + cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> true), DataType.BOOLEAN, false, true)); + + cases.add( + new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> false), DataType.BOOLEAN, false, true) + ); + + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, ESTestCase::randomBoolean), + DataType.BOOLEAN, + false, + true + ) + ); + + return cases; + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index ddff3bc3a8138..3fddaff226f3e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -39,7 +39,8 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), - MultiRowTestCaseSupplier.dateCases(1, 1000) + MultiRowTestCaseSupplier.dateCases(1, 1000), + MultiRowTestCaseSupplier.booleanCases(1, 1000) ).flatMap(List::stream).map(MaxTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -81,6 +82,15 @@ public static Iterable parameters() { equalTo(200L) ) ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true, false, false, true), DataType.BOOLEAN, "field")), + "Max[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(true) + ) + ), // Folding new TestCaseSupplier( @@ -118,6 +128,15 @@ public static Iterable parameters() { DataType.DATETIME, equalTo(200L) ) + ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true), DataType.BOOLEAN, "field")), + "Max[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(true) + ) ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index fdacf448d52a0..6f59928059bec 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -39,7 +39,8 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), - MultiRowTestCaseSupplier.dateCases(1, 1000) + MultiRowTestCaseSupplier.dateCases(1, 1000), + MultiRowTestCaseSupplier.booleanCases(1, 1000) ).flatMap(List::stream).map(MinTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -81,6 +82,15 @@ public static Iterable parameters() { equalTo(0L) ) ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true, false, false, true), DataType.BOOLEAN, "field")), + "Min[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(false) + ) + ), // Folding new TestCaseSupplier( @@ -118,6 +128,15 @@ public static Iterable parameters() { DataType.DATETIME, equalTo(200L) ) + ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true), DataType.BOOLEAN, "field")), + "Min[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(true) + ) ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java index b7b7e7ce84756..c0c23ce29301e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java @@ -241,7 +241,7 @@ private static TestCaseSupplier makeSupplier( ), "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", fieldSupplier.type(), - equalTo(expected) + equalTo(expected.size() == 1 ? expected.get(0) : expected) ); }); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java index a9663f9e37852..974c8703b2a09 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java @@ -9,7 +9,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.xpack.esql.analysis.Verifier; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java index d475fd099d4ac..f0196834b9175 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java @@ -49,35 +49,39 @@ public static void shutdown() { @AwaitsFix(bugUrl = "Cannot set the URL in the tests") public void testOpenAiEmbeddings() throws IOException { var openAiEmbeddingsSupported = getOldClusterTestVersion().onOrAfter(OPEN_AI_AZURE_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Azure OpenAI embedding service added in " + OPEN_AI_AZURE_EMBEDDINGS_ADDED, openAiEmbeddingsSupported); final String oldClusterId = "old-cluster-embeddings"; final String upgradedClusterId = "upgraded-cluster-embeddings"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { // queue a response as PUT will call the service openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(OpenAiServiceUpgradeIT.embeddingResponse())); - put(oldClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(oldClusterEndpointIdentifier); assertThat(configs, hasSize(1)); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); assertEquals("azureopenai", configs.get(0).get("service")); assertEmbeddingInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); // Inference on old cluster model assertEmbeddingInference(oldClusterId); openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(OpenAiServiceUpgradeIT.embeddingResponse())); - put(upgradedClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterId).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); // Inference on the new config diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java index c889d8f9b312a..c7d95f1f512b2 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java @@ -15,6 +15,8 @@ import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; import org.hamcrest.Matchers; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.List; @@ -39,7 +41,7 @@ public CohereServiceUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - // @BeforeClass + @BeforeClass public static void startWebServer() throws IOException { cohereEmbeddingsServer = new MockWebServer(); cohereEmbeddingsServer.start(); @@ -48,58 +50,74 @@ public static void startWebServer() throws IOException { cohereRerankServer.start(); } - // @AfterClass // for the awaitsfix + @AfterClass public static void shutdown() { cohereEmbeddingsServer.close(); cohereRerankServer.close(); } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testCohereEmbeddings() throws IOException { var embeddingsSupported = getOldClusterTestVersion().onOrAfter(COHERE_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Cohere embedding service added in " + COHERE_EMBEDDINGS_ADDED, embeddingsSupported); final String oldClusterIdInt8 = "old-cluster-embeddings-int8"; final String oldClusterIdFloat = "old-cluster-embeddings-float"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { // queue a response as PUT will call the service cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte())); - put(oldClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), testTaskType); // float model cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseFloat())); - put(oldClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdInt8).get("endpoints"); - assertThat(configs, hasSize(1)); - assertEquals("cohere", configs.get(0).get("service")); - var serviceSettings = (Map) configs.get(0).get("service_settings"); - assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); - var embeddingType = serviceSettings.get("embedding_type"); - // An upgraded node will report the embedding type as byte, the old node int8 - assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); - - assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); - assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + { + var configs = (List>) get(testTaskType, oldClusterIdInt8).get(oldClusterEndpointIdentifier); + assertThat(configs, hasSize(1)); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + var embeddingType = serviceSettings.get("embedding_type"); + // An upgraded node will report the embedding type as byte, the old node int8 + assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); + assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); + } + { + var configs = (List>) get(testTaskType, oldClusterIdFloat).get(oldClusterEndpointIdentifier); + assertThat(configs, hasSize(1)); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + assertThat(serviceSettings, hasEntry("embedding_type", "float")); + assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + } } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdInt8).get("endpoints"); - assertEquals("cohere", configs.get(0).get("service")); - var serviceSettings = (Map) configs.get(0).get("service_settings"); - assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); - var embeddingType = serviceSettings.get("embedding_type"); - // An upgraded node will report the embedding type as byte, an old node int8 - assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); - - configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdFloat).get("endpoints"); - serviceSettings = (Map) configs.get(0).get("service_settings"); - assertThat(serviceSettings, hasEntry("embedding_type", "float")); - - assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); - assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + { + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterIdInt8); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + var embeddingType = serviceSettings.get("embedding_type"); + // An upgraded node will report the embedding type as byte, an old node int8 + assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); + assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); + } + { + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterIdFloat); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + assertThat(serviceSettings, hasEntry("embedding_type", "float")); + assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + } } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdInt8).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterIdInt8).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); assertThat(serviceSettings, hasEntry("embedding_type", "byte")); @@ -114,9 +132,9 @@ public void testCohereEmbeddings() throws IOException { final String upgradedClusterIdByte = "upgraded-cluster-embeddings-byte"; cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte())); - put(upgradedClusterIdByte, embeddingConfigByte(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterIdByte, embeddingConfigByte(getUrl(cohereEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterIdByte).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterIdByte).get("endpoints"); serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("embedding_type", "byte")); @@ -127,9 +145,9 @@ public void testCohereEmbeddings() throws IOException { final String upgradedClusterIdInt8 = "upgraded-cluster-embeddings-int8"; cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte())); - put(upgradedClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterIdInt8).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterIdInt8).get("endpoints"); serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("embedding_type", "byte")); // int8 rewritten to byte @@ -139,9 +157,9 @@ public void testCohereEmbeddings() throws IOException { { final String upgradedClusterIdFloat = "upgraded-cluster-embeddings-float"; cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseFloat())); - put(upgradedClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterIdFloat).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterIdFloat).get("endpoints"); serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("embedding_type", "float")); @@ -169,22 +187,25 @@ void assertEmbeddingInference(String inferenceId, CohereEmbeddingType type) thro } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testRerank() throws IOException { var rerankSupported = getOldClusterTestVersion().onOrAfter(COHERE_RERANK_ADDED); + String old_cluster_endpoint_identifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Cohere rerank service added in " + COHERE_RERANK_ADDED, rerankSupported); final String oldClusterId = "old-cluster-rerank"; final String upgradedClusterId = "upgraded-cluster-rerank"; + var testTaskType = TaskType.RERANK; + if (isOldCluster()) { - put(oldClusterId, rerankConfig(getUrl(cohereRerankServer)), TaskType.RERANK); - var configs = (List>) get(TaskType.RERANK, oldClusterId).get("endpoints"); + put(oldClusterId, rerankConfig(getUrl(cohereRerankServer)), testTaskType); + var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); assertRerank(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.RERANK, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); + assertEquals("cohere", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "rerank-english-v3.0")); @@ -195,7 +216,7 @@ public void testRerank() throws IOException { } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.RERANK, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); assertEquals("cohere", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "rerank-english-v3.0")); @@ -205,7 +226,7 @@ public void testRerank() throws IOException { assertRerank(oldClusterId); // New endpoint - put(upgradedClusterId, rerankConfig(getUrl(cohereRerankServer)), TaskType.RERANK); + put(upgradedClusterId, rerankConfig(getUrl(cohereRerankServer)), testTaskType); configs = (List>) get(upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java index 899a02776195d..36ee472cc0a13 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java @@ -13,6 +13,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.List; @@ -34,7 +36,7 @@ public HuggingFaceServiceUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - // @BeforeClass + @BeforeClass public static void startWebServer() throws IOException { embeddingsServer = new MockWebServer(); embeddingsServer.start(); @@ -43,47 +45,51 @@ public static void startWebServer() throws IOException { elserServer.start(); } - // @AfterClass for the awaits fix + @AfterClass public static void shutdown() { embeddingsServer.close(); elserServer.close(); } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testHFEmbeddings() throws IOException { var embeddingsSupported = getOldClusterTestVersion().onOrAfter(HF_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Hugging Face embedding service added in " + HF_EMBEDDINGS_ADDED, embeddingsSupported); final String oldClusterId = "old-cluster-embeddings"; final String upgradedClusterId = "upgraded-cluster-embeddings"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { // queue a response as PUT will call the service embeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(oldClusterId, embeddingConfig(getUrl(embeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterId, embeddingConfig(getUrl(embeddingsServer)), testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(oldClusterEndpointIdentifier); assertThat(configs, hasSize(1)); assertEmbeddingInference(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); + assertEquals("hugging_face", configs.get(0).get("service")); assertEmbeddingInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); assertEquals("hugging_face", configs.get(0).get("service")); // Inference on old cluster model assertEmbeddingInference(oldClusterId); embeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(upgradedClusterId, embeddingConfig(getUrl(embeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterId, embeddingConfig(getUrl(embeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterId).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); assertEmbeddingInference(upgradedClusterId); @@ -100,27 +106,29 @@ void assertEmbeddingInference(String inferenceId) throws IOException { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testElser() throws IOException { var supported = getOldClusterTestVersion().onOrAfter(HF_ELSER_ADDED); + String old_cluster_endpoint_identifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("HF elser service added in " + HF_ELSER_ADDED, supported); final String oldClusterId = "old-cluster-elser"; final String upgradedClusterId = "upgraded-cluster-elser"; + var testTaskType = TaskType.SPARSE_EMBEDDING; + if (isOldCluster()) { - put(oldClusterId, elserConfig(getUrl(elserServer)), TaskType.SPARSE_EMBEDDING); - var configs = (List>) get(TaskType.SPARSE_EMBEDDING, oldClusterId).get("endpoints"); + put(oldClusterId, elserConfig(getUrl(elserServer)), testTaskType); + var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); assertElser(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.SPARSE_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); assertEquals("hugging_face", configs.get(0).get("service")); assertElser(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.SPARSE_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); assertEquals("hugging_face", configs.get(0).get("service")); var taskSettings = (Map) configs.get(0).get("task_settings"); assertThat(taskSettings.keySet(), empty()); @@ -128,7 +136,7 @@ public void testElser() throws IOException { assertElser(oldClusterId); // New endpoint - put(upgradedClusterId, elserConfig(getUrl(elserServer)), TaskType.SPARSE_EMBEDDING); + put(upgradedClusterId, elserConfig(getUrl(elserServer)), testTaskType); configs = (List>) get(upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java index ecfec2304c8a1..58335eb53b366 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java @@ -16,13 +16,17 @@ import org.elasticsearch.upgrades.AbstractRollingUpgradeTestCase; import java.io.IOException; +import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.core.Strings.format; public class InferenceUpgradeTestCase extends AbstractRollingUpgradeTestCase { + static final String MODELS_RENAMED_TO_ENDPOINTS = "8.15.0"; + public InferenceUpgradeTestCase(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } @@ -104,4 +108,17 @@ protected void put(String inferenceId, String modelConfig, TaskType taskType) th var response = client().performRequest(request); assertOKAndConsume(response); } + + @SuppressWarnings("unchecked") + // in version 8.15, there was a breaking change where "models" was renamed to "endpoints" + LinkedList> getConfigsWithBreakingChangeHandling(TaskType testTaskType, String oldClusterId) throws IOException { + + LinkedList> configs; + configs = new LinkedList<>( + (List>) Objects.requireNonNullElse((get(testTaskType, oldClusterId).get("endpoints")), List.of()) + ); + configs.addAll(Objects.requireNonNullElse((List>) get(testTaskType, oldClusterId).get("models"), List.of())); + + return configs; + } } diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java index bfdcb0e0d5ed4..df995c6f5e620 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java @@ -12,6 +12,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.List; @@ -35,7 +37,7 @@ public OpenAiServiceUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - // @BeforeClass + @BeforeClass public static void startWebServer() throws IOException { openAiEmbeddingsServer = new MockWebServer(); openAiEmbeddingsServer.start(); @@ -44,33 +46,37 @@ public static void startWebServer() throws IOException { openAiChatCompletionsServer.start(); } - // @AfterClass for the awaits fix + @AfterClass public static void shutdown() { openAiEmbeddingsServer.close(); openAiChatCompletionsServer.close(); } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testOpenAiEmbeddings() throws IOException { var openAiEmbeddingsSupported = getOldClusterTestVersion().onOrAfter(OPEN_AI_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("OpenAI embedding service added in " + OPEN_AI_EMBEDDINGS_ADDED, openAiEmbeddingsSupported); final String oldClusterId = "old-cluster-embeddings"; final String upgradedClusterId = "upgraded-cluster-embeddings"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { String inferenceConfig = oldClusterVersionCompatibleEmbeddingConfig(); // queue a response as PUT will call the service openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(oldClusterId, inferenceConfig, TaskType.TEXT_EMBEDDING); + put(oldClusterId, inferenceConfig, testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(oldClusterEndpointIdentifier); assertThat(configs, hasSize(1)); assertEmbeddingInference(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); + assertEquals("openai", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); var taskSettings = (Map) configs.get(0).get("task_settings"); @@ -80,7 +86,7 @@ public void testOpenAiEmbeddings() throws IOException { assertEmbeddingInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); // model id is moved to service settings assertThat(serviceSettings, hasEntry("model_id", "text-embedding-ada-002")); @@ -92,9 +98,9 @@ public void testOpenAiEmbeddings() throws IOException { String inferenceConfig = embeddingConfigWithModelInServiceSettings(getUrl(openAiEmbeddingsServer)); openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(upgradedClusterId, inferenceConfig, TaskType.TEXT_EMBEDDING); + put(upgradedClusterId, inferenceConfig, testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterId).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); assertEmbeddingInference(upgradedClusterId); @@ -111,23 +117,29 @@ void assertEmbeddingInference(String inferenceId) throws IOException { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testOpenAiCompletions() throws IOException { var openAiEmbeddingsSupported = getOldClusterTestVersion().onOrAfter(OPEN_AI_COMPLETIONS_ADDED); + String old_cluster_endpoint_identifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("OpenAI completions service added in " + OPEN_AI_COMPLETIONS_ADDED, openAiEmbeddingsSupported); final String oldClusterId = "old-cluster-completions"; final String upgradedClusterId = "upgraded-cluster-completions"; + var testTaskType = TaskType.COMPLETION; + if (isOldCluster()) { - put(oldClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), TaskType.COMPLETION); + put(oldClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), testTaskType); - var configs = (List>) get(TaskType.COMPLETION, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); assertCompletionInference(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.COMPLETION, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); + if (oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) == false) { + configs.addAll((List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier)); + // in version 8.15, there was a breaking change where "models" was renamed to "endpoints" + } assertEquals("openai", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "gpt-4")); @@ -137,7 +149,7 @@ public void testOpenAiCompletions() throws IOException { assertCompletionInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.COMPLETION, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "gpt-4")); var taskSettings = (Map) configs.get(0).get("task_settings"); @@ -145,8 +157,8 @@ public void testOpenAiCompletions() throws IOException { assertCompletionInference(oldClusterId); - put(upgradedClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), TaskType.COMPLETION); - configs = (List>) get(TaskType.COMPLETION, upgradedClusterId).get("endpoints"); + put(upgradedClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), testTaskType); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); // Inference on the new config diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java index 7f71933676ee0..c5bb536833e89 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java @@ -161,5 +161,5 @@ private static RankedDocsResults.RankedDoc parseRankedDocObject(XContentParser p private CohereRankedResponseEntity() {} - static String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Cohere embeddings response"; + static String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Cohere rerank response"; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index 99779ac378d89..9f810b829bea9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.inference.results.TextEmbedding; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.settings.ApiKeySecrets; @@ -131,6 +132,9 @@ public static Object removeAsOneOfTypes( } public static AdaptiveAllocationsSettings removeAsAdaptiveAllocationsSettings(Map sourceMap, String key) { + if (AdaptiveAllocationsFeatureFlag.isEnabled() == false) { + return null; + } Map settingsMap = ServiceUtils.removeFromMap(sourceMap, key); return settingsMap == null ? null diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java index 8cb9305edd057..7fbfe70dbcfe7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -151,7 +151,6 @@ public void testRerankInferenceFailure() { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110398") public void testRerankInferenceResultMismatch() { ElasticsearchAssertions.assertFailures( // Execute search with text similarity reranking diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index 346b67a169912..a1664b7023fc0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -32,6 +32,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.ml.inference.adaptiveallocations.AdaptiveAllocationsScalerService; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; @@ -123,13 +124,17 @@ public void beforeStop() { public void onMaster() { mlDailyMaintenanceService.start(); - adaptiveAllocationsScalerService.start(); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + adaptiveAllocationsScalerService.start(); + } threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(this::makeMlInternalIndicesHidden); } public void offMaster() { mlDailyMaintenanceService.stop(); - adaptiveAllocationsScalerService.stop(); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + adaptiveAllocationsScalerService.stop(); + } } @Override diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 12eeaf8732235..e0433ea6fdd71 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 17; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 18; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml index e5babad76eb05..bcee1691e033c 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml @@ -175,6 +175,6 @@ setup: - match: { status: 500 } - match: { error.type: repository_verification_exception } - - match: { error.reason: "/.*test_repo_slow..analysis.failed.*/" } + - match: { error.reason: "/.*test_repo_slow..Repository.analysis.timed.out.*/" } - match: { error.root_cause.0.type: repository_verification_exception } - match: { error.root_cause.0.reason: "/.*test_repo_slow..analysis.timed.out.after..1s.*/" } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java index 7715b9e8d42b8..2ca5685c83db3 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -363,6 +364,17 @@ public BytesReference onContendedCompareAndExchange(BytesRegister register, Byte } } + private static void assertAnalysisFailureMessage(String message) { + assertThat( + message, + allOf( + containsString("Elasticsearch observed the storage system underneath this repository behaved incorrectly"), + containsString("not suitable for use with Elasticsearch snapshots"), + containsString(ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS.toString()) + ) + ); + } + public void testTimesOutSpinningRegisterAnalysis() { final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo"); request.timeout(TimeValue.timeValueMillis(between(1, 1000))); @@ -375,7 +387,13 @@ public boolean compareAndExchangeReturnsWitness(String key) { } }); final var exception = expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request)); - assertThat(exception.getMessage(), containsString("analysis failed")); + assertThat( + exception.getMessage(), + allOf( + containsString("Repository analysis timed out. Consider specifying a longer timeout"), + containsString(ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS.toString()) + ) + ); assertThat( asInstanceOf(RepositoryVerificationException.class, exception.getCause()).getMessage(), containsString("analysis timed out") @@ -391,7 +409,7 @@ public boolean compareAndExchangeReturnsWitness(String key) { } }); final var exception = expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request)); - assertThat(exception.getMessage(), containsString("analysis failed")); + assertAnalysisFailureMessage(exception.getMessage()); assertThat( asInstanceOf(RepositoryVerificationException.class, ExceptionsHelper.unwrapCause(exception.getCause())).getMessage(), allOf(containsString("uncontended register operation failed"), containsString("did not observe any value")) @@ -407,7 +425,7 @@ public boolean acceptsEmptyRegister() { } }); final var exception = expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request)); - assertThat(exception.getMessage(), containsString("analysis failed")); + assertAnalysisFailureMessage(exception.getMessage()); final var cause = ExceptionsHelper.unwrapCause(exception.getCause()); if (cause instanceof IOException ioException) { assertThat(ioException.getMessage(), containsString("empty register update rejected")); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index 7b82b69a682fa..494d1d3fedcd9 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -387,6 +388,9 @@ public static class AsyncAction { private final List responses; private final RepositoryPerformanceSummary.Builder summary = new RepositoryPerformanceSummary.Builder(); + private final RepositoryVerificationException analysisCancelledException; + private final RepositoryVerificationException analysisTimedOutException; + public AsyncAction( TransportService transportService, BlobStoreRepository repository, @@ -410,6 +414,12 @@ public AsyncAction( this.listener = ActionListener.runBefore(listener, () -> cancellationListener.onResponse(null)); responses = new ArrayList<>(request.blobCount); + + this.analysisCancelledException = new RepositoryVerificationException(request.repositoryName, "analysis cancelled"); + this.analysisTimedOutException = new RepositoryVerificationException( + request.repositoryName, + "analysis timed out after [" + request.getTimeout() + "]" + ); } private boolean setFirstFailure(Exception e) { @@ -453,12 +463,7 @@ public void onFailure(Exception e) { assert e instanceof ElasticsearchTimeoutException : e; if (isRunning()) { // if this CAS fails then we're already failing for some other reason, nbd - setFirstFailure( - new RepositoryVerificationException( - request.repositoryName, - "analysis timed out after [" + request.getTimeout() + "]" - ) - ); + setFirstFailure(analysisTimedOutException); } } } @@ -472,7 +477,7 @@ public void run() { cancellationListener.addTimeout(request.getTimeout(), repository.threadPool(), EsExecutors.DIRECT_EXECUTOR_SERVICE); cancellationListener.addListener(new CheckForCancelListener()); - task.addListener(() -> setFirstFailure(new RepositoryVerificationException(request.repositoryName, "analysis cancelled"))); + task.addListener(() -> setFirstFailure(analysisCancelledException)); final Random random = new Random(request.getSeed()); final List nodes = getSnapshotNodes(discoveryNodes); @@ -873,13 +878,20 @@ private void sendResponse(final long listingStartTimeNanos, final long deleteSta ); } else { logger.debug(() -> "analysis of repository [" + request.repositoryName + "] failed", exception); - listener.onFailure( - new RepositoryVerificationException( - request.getRepositoryName(), - "analysis failed, you may need to manually remove [" + blobPath + "]", - exception - ) - ); + + final String failureDetail; + if (exception == analysisCancelledException) { + failureDetail = "Repository analysis was cancelled."; + } else if (exception == analysisTimedOutException) { + failureDetail = Strings.format(""" + Repository analysis timed out. Consider specifying a longer timeout using the [?timeout] request parameter. See \ + [%s] for more information.""", ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS); + } else { + failureDetail = repository.getAnalysisFailureExtraDetail(); + } + listener.onFailure(new RepositoryVerificationException(request.getRepositoryName(), Strings.format(""" + %s Elasticsearch attempted to remove the data it wrote at [%s] but may have left some behind. If so, \ + please now remove this data manually.""", failureDetail, blobPath), exception)); } } } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index aa1e8858163a5..648146ccdcc61 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -47,7 +47,7 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { // The stack template registry version. This number must be incremented when we make changes // to built-in templates. - public static final int REGISTRY_VERSION = 11; + public static final int REGISTRY_VERSION = 12; public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; public static final Setting STACK_TEMPLATES_ENABLED = Setting.boolSetting( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java index 3412be813dcf6..23bab56de5ec9 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.transform.transforms.common; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -45,6 +47,7 @@ * Basic abstract class for implementing a transform function that utilizes composite aggregations */ public abstract class AbstractCompositeAggFunction implements Function { + private static final Logger logger = LogManager.getLogger(AbstractCompositeAggFunction.class); public static final int TEST_QUERY_PAGE_SIZE = 50; public static final String COMPOSITE_AGGREGATION_NAME = "_transform"; @@ -78,7 +81,7 @@ public void preview( ClientHelper.TRANSFORM_ORIGIN, client, TransportSearchAction.TYPE, - buildSearchRequest(sourceConfig, timeout, numberOfBuckets), + buildSearchRequestForValidation("preview", sourceConfig, timeout, numberOfBuckets), ActionListener.wrap(r -> { try { final InternalAggregations aggregations = r.getAggregations(); @@ -116,7 +119,7 @@ public void validateQuery( TimeValue timeout, ActionListener listener ) { - SearchRequest searchRequest = buildSearchRequest(sourceConfig, timeout, TEST_QUERY_PAGE_SIZE); + SearchRequest searchRequest = buildSearchRequestForValidation("validate", sourceConfig, timeout, TEST_QUERY_PAGE_SIZE); ClientHelper.executeWithHeadersAsync( headers, ClientHelper.TRANSFORM_ORIGIN, @@ -193,11 +196,12 @@ protected abstract Stream> extractResults( TransformProgress progress ); - private SearchRequest buildSearchRequest(SourceConfig sourceConfig, TimeValue timeout, int pageSize) { + private SearchRequest buildSearchRequestForValidation(String logId, SourceConfig sourceConfig, TimeValue timeout, int pageSize) { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(sourceConfig.getQueryConfig().getQuery()) .runtimeMappings(sourceConfig.getRuntimeMappings()) .timeout(timeout); buildSearchQuery(sourceBuilder, null, pageSize); + logger.debug("[{}] Querying {} for data: {}", logId, sourceConfig.getIndex(), sourceBuilder); return new SearchRequest(sourceConfig.getIndex()).source(sourceBuilder).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); }