diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java index 162895fd486cf..d0c7e9316d996 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java @@ -26,7 +26,53 @@ public abstract class OracleOpenJdkToolchainResolver extends AbstractCustomJavaToolchainResolver { - record JdkBuild(JavaLanguageVersion languageVersion, String version, String buildNumber, String hash) {} + interface JdkBuild { + JavaLanguageVersion languageVersion(); + + String url(String os, String arch, String extension); + } + + record ReleasedJdkBuild(JavaLanguageVersion languageVersion, String version, String buildNumber, String hash) implements JdkBuild { + + @Override + public String url(String os, String arch, String extension) { + return "https://download.oracle.com/java/GA/jdk" + + version + + "/" + + hash + + "/" + + buildNumber + + "/GPL/openjdk-" + + version + + "_" + + os + + "-" + + arch + + "_bin." + + extension; + } + } + + record EarlyAccessJdkBuild(JavaLanguageVersion languageVersion, String version, String buildNumber) implements JdkBuild { + + @Override + public String url(String os, String arch, String extension) { + return "https://download.java.net/java/early_access/jdk" + + version + + "/" + + version + + "/GPL/openjdk-" + + version + + "-ea+" + + buildNumber + + "_" + + os + + "-" + + arch + + "_bin." + + extension; + } + } private static final Pattern VERSION_PATTERN = Pattern.compile( "(\\d+)(\\.\\d+\\.\\d+(?:\\.\\d+)?)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?" @@ -39,7 +85,11 @@ record JdkBuild(JavaLanguageVersion languageVersion, String version, String buil ); // package private so it can be replaced by tests - List builds = List.of(getBundledJdkBuild()); + List builds = List.of( + getBundledJdkBuild(), + // 23 early access + new EarlyAccessJdkBuild(JavaLanguageVersion.of(23), "23", "23") + ); private JdkBuild getBundledJdkBuild() { String bundledJdkVersion = VersionProperties.getBundledJdkVersion(); @@ -51,7 +101,7 @@ private JdkBuild getBundledJdkBuild() { String baseVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : ""); String build = jdkVersionMatcher.group(3); String hash = jdkVersionMatcher.group(5); - return new JdkBuild(bundledJdkMajorVersion, baseVersion, build, hash); + return new ReleasedJdkBuild(bundledJdkMajorVersion, baseVersion, build, hash); } /** @@ -68,24 +118,7 @@ public Optional resolve(JavaToolchainRequest request) { String extension = operatingSystem.equals(OperatingSystem.WINDOWS) ? "zip" : "tar.gz"; String arch = toArchString(request.getBuildPlatform().getArchitecture()); String os = toOsString(operatingSystem); - return Optional.of( - () -> URI.create( - "https://download.oracle.com/java/GA/jdk" - + build.version - + "/" - + build.hash - + "/" - + build.buildNumber - + "/GPL/openjdk-" - + build.version - + "_" - + os - + "-" - + arch - + "_bin." - + extension - ) - ); + return Optional.of(() -> URI.create(build.url(os, arch, extension))); } /** @@ -113,7 +146,7 @@ private JdkBuild findSupportedBuild(JavaToolchainRequest request) { JavaLanguageVersion languageVersion = javaToolchainSpec.getLanguageVersion().get(); for (JdkBuild build : builds) { - if (build.languageVersion.equals(languageVersion)) { + if (build.languageVersion().equals(languageVersion)) { return build; } } diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy index b076baa94c2fb..82bcbca3785d6 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy @@ -25,7 +25,8 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { } } toolChain.builds = [ - new OracleOpenJdkToolchainResolver.JdkBuild(JavaLanguageVersion.of(20), "20", "36", "bdc68b4b9cbc4ebcb30745c85038d91d") + new OracleOpenJdkToolchainResolver.ReleasedJdkBuild(JavaLanguageVersion.of(20), "20", "36", "bdc68b4b9cbc4ebcb30745c85038d91d"), + new OracleOpenJdkToolchainResolver.EarlyAccessJdkBuild(JavaLanguageVersion.of(21), "21", "6") ] toolChain } @@ -40,7 +41,18 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { [20, anyVendor(), MAC_OS, AARCH64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-aarch64_bin.tar.gz"], [20, anyVendor(), LINUX, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-x64_bin.tar.gz"], [20, anyVendor(), LINUX, AARCH64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-aarch64_bin.tar.gz"], - [20, anyVendor(), WINDOWS, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip"] + [20, anyVendor(), WINDOWS, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip"], + // https://download.java.net/java/early_access/jdk23/23/GPL/openjdk-23-ea+23_macos-aarch64_bin.tar.gz + [21, ORACLE, MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-x64_bin.tar.gz"], + [21, ORACLE, MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-aarch64_bin.tar.gz"], + [21, ORACLE, LINUX, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-x64_bin.tar.gz"], + [21, ORACLE, LINUX, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-aarch64_bin.tar.gz"], + [21, ORACLE, WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_windows-x64_bin.zip"], + [21, anyVendor(), MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-x64_bin.tar.gz"], + [21, anyVendor(), MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-aarch64_bin.tar.gz"], + [21, anyVendor(), LINUX, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-x64_bin.tar.gz"], + [21, anyVendor(), LINUX, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-aarch64_bin.tar.gz"], + [21, anyVendor(), WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_windows-x64_bin.zip"] ] } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index e55e8ec39654e..c0d2dc0bdb5c7 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -23,8 +23,9 @@ dependencies { compileOnly project(":libs:elasticsearch-cli") implementation project(":libs:elasticsearch-plugin-api") implementation project(":libs:elasticsearch-plugin-scanner") - implementation 'org.ow2.asm:asm:9.6' - implementation 'org.ow2.asm:asm-tree:9.6' + // TODO: asm is picked up from the plugin scanner, we should consolidate so it is not defined twice + implementation 'org.ow2.asm:asm:9.7' + implementation 'org.ow2.asm:asm-tree:9.7' api "org.bouncycastle:bcpg-fips:1.0.7.1" api "org.bouncycastle:bc-fips:1.0.2.4" diff --git a/docs/changelog/108306.yaml b/docs/changelog/108306.yaml new file mode 100644 index 0000000000000..7a104ce880f43 --- /dev/null +++ b/docs/changelog/108306.yaml @@ -0,0 +1,5 @@ +pr: 108306 +summary: Enable inter-segment concurrency for low cardinality numeric terms aggs +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/108679.yaml b/docs/changelog/108679.yaml new file mode 100644 index 0000000000000..62cd82a52c5bb --- /dev/null +++ b/docs/changelog/108679.yaml @@ -0,0 +1,6 @@ +pr: 108679 +summary: Suppress deprecation warnings from ingest pipelines when deleting trained model +area: Machine Learning +type: bug +issues: + - 105004 diff --git a/docs/changelog/108693.yaml b/docs/changelog/108693.yaml new file mode 100644 index 0000000000000..ee701e0f57736 --- /dev/null +++ b/docs/changelog/108693.yaml @@ -0,0 +1,5 @@ +pr: 108693 +summary: Test pipeline run after reroute +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/108736.yaml b/docs/changelog/108736.yaml new file mode 100644 index 0000000000000..41e4084021e00 --- /dev/null +++ b/docs/changelog/108736.yaml @@ -0,0 +1,5 @@ +pr: 108736 +summary: Harden field-caps request dispatcher +area: Search +type: bug +issues: [] diff --git a/docs/changelog/108780.yaml b/docs/changelog/108780.yaml new file mode 100644 index 0000000000000..40e66326e6b9b --- /dev/null +++ b/docs/changelog/108780.yaml @@ -0,0 +1,6 @@ +pr: 108780 +summary: Add `continent_code` support to the geoip processor +area: Ingest Node +type: enhancement +issues: + - 85820 diff --git a/docs/changelog/108786.yaml b/docs/changelog/108786.yaml new file mode 100644 index 0000000000000..1c07a3ceac900 --- /dev/null +++ b/docs/changelog/108786.yaml @@ -0,0 +1,5 @@ +pr: 108786 +summary: Make ingest byte stat names more descriptive +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/108796.yaml b/docs/changelog/108796.yaml new file mode 100644 index 0000000000000..808247cf347d9 --- /dev/null +++ b/docs/changelog/108796.yaml @@ -0,0 +1,5 @@ +pr: 108796 +summary: Return ingest byte stats even when 0-valued +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/108802.yaml b/docs/changelog/108802.yaml new file mode 100644 index 0000000000000..7c28a81a1b353 --- /dev/null +++ b/docs/changelog/108802.yaml @@ -0,0 +1,5 @@ +pr: 108802 +summary: Fix multithreading copies in lib vec +area: Vector Search +type: bug +issues: [] diff --git a/docs/changelog/108820.yaml b/docs/changelog/108820.yaml new file mode 100644 index 0000000000000..55045ffce3dfa --- /dev/null +++ b/docs/changelog/108820.yaml @@ -0,0 +1,5 @@ +pr: 108820 +summary: Allow `LuceneSourceOperator` to early terminate +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/108822.yaml b/docs/changelog/108822.yaml new file mode 100644 index 0000000000000..8cec4da5dbc7f --- /dev/null +++ b/docs/changelog/108822.yaml @@ -0,0 +1,6 @@ +pr: 108822 +summary: Update ASM to 9.7 for plugin scanner +area: Infra/Plugins +type: upgrade +issues: + - 108776 diff --git a/docs/reference/cluster/cluster-info.asciidoc b/docs/reference/cluster/cluster-info.asciidoc index 9acac87f9b4e9..7d67f1602aeaa 100644 --- a/docs/reference/cluster/cluster-info.asciidoc +++ b/docs/reference/cluster/cluster-info.asciidoc @@ -207,21 +207,33 @@ pipeline. (integer) Total number of failed operations for the ingest pipeline. -`ingested_in_bytes`:: -(Optional, integer) -Total number of bytes of all documents ingested by the pipeline. -This field is only present on pipelines which are the first to process a document. -Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after -a reroute processor, or pipelines in pipeline processors. - -`produced_in_bytes`:: -(Optional, integer) -Total number of bytes of all documents produced by the pipeline. -This field is only present on pipelines which are the first to process a document. -Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after -a reroute processor, or pipelines in pipeline processors. -In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines -have run. +`ingested_as_first_pipeline`:: +(<>) +Total ingested size of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. + +`ingested_as_first_pipeline_in_bytes`:: +(integer) +Total ingested size, in bytes, of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. + +`produced_as_first_pipeline`:: +(<>) +Total produced size of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. + +`produced_as_first_pipeline_in_bytes`:: +(integer) +Total produced size, in bytes, of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. `processors`:: (array of objects) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 46656373519d0..59cb7167028c8 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -2643,21 +2643,33 @@ pipeline. (integer) Total number of failed operations for the ingest pipeline. -`ingested_in_bytes`:: -(Optional, integer) -Total number of bytes of all documents ingested by the pipeline. -This field is only present on pipelines which are the first to process a document. -Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after -a reroute processor, or pipelines in pipeline processors. - -`produced_in_bytes`:: -(Optional, integer) -Total number of bytes of all documents produced by the pipeline. -This field is only present on pipelines which are the first to process a document. -Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after -a reroute processor, or pipelines in pipeline processors. -In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines -have run. +`ingested_as_first_pipeline`:: +(<>) +Total ingested size of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. + +`ingested_as_first_pipeline_in_bytes`:: +(integer) +Total ingested size, in bytes, of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. + +`produced_as_first_pipeline`:: +(<>) +Total produced size of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. + +`produced_as_first_pipeline_in_bytes`:: +(integer) +Total produced size, in bytes, of all documents which were first processed by this pipeline. +A document is not added to the stat value for this pipeline if it is a final pipeline after a default pipeline, a pipeline +run after a reroute processor, or is within a pipeline processor. +Instead, the document size is added to the stat value of the pipeline which initially ingested the document. `processors`:: (array of objects) diff --git a/docs/reference/ilm/actions/ilm-rollover.asciidoc b/docs/reference/ilm/actions/ilm-rollover.asciidoc index 4731986bd2559..3a60d689b4c0f 100644 --- a/docs/reference/ilm/actions/ilm-rollover.asciidoc +++ b/docs/reference/ilm/actions/ilm-rollover.asciidoc @@ -7,6 +7,13 @@ Phases allowed: hot. Rolls over a target to a new index when the existing index satisfies the specified rollover conditions. +[NOTE] +==== +When an index is rolled over, the previous index's age is updated to reflect the rollover time. +This date, rather than the index's `creation_date`, is used in {ilm} +`min_age` phase calculations. <>. +==== + IMPORTANT: If the rollover action is used on a <>, policy execution waits until the leader index rolls over (or is <>), @@ -46,11 +53,11 @@ PUT my-index-000001 [[ilm-rollover-options]] ==== Options -A rollover action must specify at least one max_* condition, it may include zero -or more min_* conditions. An empty rollover action is invalid. +A rollover action must specify at least one `max_*` condition, it may include zero +or more `min_*` conditions. An empty rollover action is invalid. -The index will rollover once any max_* condition is satisfied and all -min_* conditions are satisfied. Note, however, that empty indices are not rolled +The index will roll over once any `max_*` condition is satisfied and all +`min_*` conditions are satisfied. Note, however, that empty indices are not rolled over by default. // tag::rollover-conditions[] @@ -256,7 +263,7 @@ PUT _ilm/policy/my_policy ===== Roll over using multiple conditions When you specify multiple rollover conditions, -the index is rolled over when _any_ of the max_* and _all_ of the min_* conditions are met. +the index is rolled over when _any_ of the `max_*` and _all_ of the `min_*` conditions are met. This example rolls the index over if it is at least 7 days old or at least 100 gigabytes, but only as long as the index contains at least 1000 documents. diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index ad974c6f1c2ed..d922fa6687823 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -154,11 +154,12 @@ You can use the <> to monitor the === Common {ilm-init} setting issues [discrete] +[[min-age-calculation]] ==== How `min_age` is calculated When setting up an <> or <>, be aware that `min_age` can be relative to either the rollover time or the index creation time. -If you use <>, `min_age` is calculated relative to the time the index was rolled over. This is because the <> generates a new index. The `creation_date` of the new index (retrievable via <>) is used in the calculation. If you do not use rollover in the {ilm-init} policy, `min_age` is calculated relative to the `creation_date` of the original index. +If you use <>, `min_age` is calculated relative to the time the index was rolled over. This is because the <> generates a new index and updates the `age` of the previous index to reflect the rollover time. If the index hasn't been rolled over, then the `age` is the same as the `creation_date` for the index. You can override how `min_age` is calculated using the `index.lifecycle.origination_date` and `index.lifecycle.parse_origination_date` <>. diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index 80b5c65504214..acf59645dae13 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -43,6 +43,12 @@ a "cold" phase with a minimum age either unset, or >= 10 days. The minimum age defaults to zero, which causes {ilm-init} to move indices to the next phase as soon as all actions in the current phase complete. +[NOTE] +==== +If an index has been <>, then the `min_age` value is relative to the time +the index was rolled over, not the index creation time. <>. +==== + If an index has unallocated shards and the <> is yellow, the index can still transition to the next phase according to its {ilm} policy. However, because {es} can only perform certain clean up tasks on a green diff --git a/docs/reference/ilm/ilm-tutorial.asciidoc b/docs/reference/ilm/ilm-tutorial.asciidoc index 36d89b7df6f21..53e8f7d8c7d04 100644 --- a/docs/reference/ilm/ilm-tutorial.asciidoc +++ b/docs/reference/ilm/ilm-tutorial.asciidoc @@ -57,7 +57,7 @@ reaches either a `max_primary_shard_size` of 50 gigabytes or a `max_age` of 30 d [NOTE] ==== -The `min_age` value is relative to the rollover time, not the index creation time. +The `min_age` value is relative to the rollover time, not the index creation time. <>. ==== You can create the policy through {kib} or with the diff --git a/docs/reference/ilm/index-rollover.asciidoc b/docs/reference/ilm/index-rollover.asciidoc index 5e6c4b89ba99f..231fb81e59fc4 100644 --- a/docs/reference/ilm/index-rollover.asciidoc +++ b/docs/reference/ilm/index-rollover.asciidoc @@ -3,8 +3,7 @@ When indexing time series data like logs or metrics, you can't write to a single index indefinitely. To meet your indexing and search performance requirements and manage resource usage, -you write to an index until some threshold is met and -then create a new index and start writing to it instead. +you write to an index until some threshold is met and then create a new index and start writing to it instead. Using rolling indices enables you to: * Optimize the active index for high ingest rates on high-performance _hot_ nodes. @@ -35,8 +34,15 @@ more configuration steps and concepts: You optimize this configuration for ingestion, typically using as many shards as you have hot nodes. * An _index alias_ that references the entire set of indices. * A single index designated as the _write index_. -This is the active index that handles all write requests. -On each rollover, the new index becomes the write index. +This is the active index that handles all write requests. +On each rollover, the new index becomes the write index. + +[NOTE] +==== +When an index is rolled over, the previous index's age is updated to reflect the rollover time. +This date, rather than the index's `creation_date`, is used in {ilm} +`min_age` phase calculations. <>. +==== [discrete] [[ilm-automatic-rollover]] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index a8c6a8f647c74..e85165a28dcb4 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -48,11 +48,11 @@ field instead. *Depends on what is available in `database_file`: * If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, +`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, and `location`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name` and `continent_name`. The fields actually added depend on what has been found and which properties -were configured in `properties`. +`country_iso_code`, `country_name`, `continent_code`, and `continent_name`. The fields actually added depend on what has been found +and which properties were configured in `properties`. * If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured in `properties`. @@ -67,10 +67,10 @@ The fields actually added depend on what has been found and which properties wer `organization_name`, `network`, `isp`, `isp_organization`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `location`, `asn`, -`organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, `residential_proxy`, -`domain`, `isp`, `isp_organization`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields -actually added depend on what has been found and which properties were configured in `properties`. +`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, +`location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`residential_proxy`, `domain`, `isp`, `isp_organization`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. Here is an example that uses the default city database and adds the geographical information to the `geoip` field based on the `ip` field: diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 3716b4b346209..fa52722c4c7d1 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -352,7 +352,7 @@ Will become: // TEST[s/^/{"_source":/ s/\n$/}/] [[range-synthetic-source-inclusive]] -Range field vales are always represented as inclusive on both sides with bounds adjusted accordingly. For example: +Range field vales are always represented as inclusive on both sides with bounds adjusted accordingly. Default values for range bounds are represented as `null`. This is true even if range bound was explicitly provided. For example: [source,console,id=synthetic-source-range-normalization-example] ---- PUT idx @@ -388,6 +388,42 @@ Will become: ---- // TEST[s/^/{"_source":/ s/\n$/}/] +[[range-synthetic-source-default-bounds]] +Default values for range bounds are represented as `null` in synthetic source. This is true even if range bound was explicitly provided with default value. For example: +[source,console,id=synthetic-source-range-bounds-example] +---- +PUT idx +{ + "mappings": { + "_source": { "mode": "synthetic" }, + "properties": { + "my_range": { "type": "integer_range" } + } + } +} + +PUT idx/_doc/1 +{ + "my_range": { + "lte": 2147483647 + } +} +---- +// TEST[s/$/\nGET idx\/_doc\/1?filter_path=_source\n/] + +Will become: + +[source,console-result] +---- +{ + "my_range": { + "gte": null, + "lte": null + } +} +---- +// TEST[s/^/{"_source":/ s/\n$/}/] + `date` ranges are formatted using provided `format` or by default using `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format. For example: [source,console,id=synthetic-source-range-date-example] ---- diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index 50754ac554439..f1b3fffb8a9a2 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -25,9 +25,9 @@ Currently only `pytorch` models are supported for deployment. Once deployed the model can be used by the <> in an ingest pipeline or directly in the <> API. -A model can be deployed multiple times by using deployment IDs. A deployment ID -must be unique and should not match any other deployment ID or model ID, unless -it is the same as the ID of the model being deployed. If `deployment_id` is not +A model can be deployed multiple times by using deployment IDs. A deployment ID +must be unique and should not match any other deployment ID or model ID, unless +it is the same as the ID of the model being deployed. If `deployment_id` is not set, it defaults to the `model_id`. Scaling inference performance can be achieved by setting the parameters @@ -61,7 +61,7 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=model-id] `cache_size`:: (Optional, <>) The inference cache size (in memory outside the JVM heap) per node for the -model. The default value is the size of the model as reported by the +model. In serverless, the cache is disabled by default. Otherwise, the default value is the size of the model as reported by the `model_size_bytes` field in the <>. To disable the cache, `0b` can be provided. @@ -165,8 +165,8 @@ The API returns the following results: [[start-trained-model-deployment-deployment-id-example]] === Using deployment IDs -The following example starts a new deployment for the `my_model` trained model -with the ID `my_model_for_ingest`. The deployment ID an be used in {infer} API +The following example starts a new deployment for the `my_model` trained model +with the ID `my_model_for_ingest`. The deployment ID an be used in {infer} API calls or in {infer} processors. [source,console] @@ -181,4 +181,4 @@ The `my_model` trained model can be deployed again with a different ID: -------------------------------------------------- POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_search -------------------------------------------------- -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 53db6f13a31b3..41c3bafde5e33 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -4037,6 +4037,11 @@ + + + + + @@ -4107,6 +4112,11 @@ + + + + + diff --git a/libs/native/jna/src/main/java/module-info.java b/libs/native/jna/src/main/java/module-info.java index 5c777170d2b56..1b95ccc7cdda0 100644 --- a/libs/native/jna/src/main/java/module-info.java +++ b/libs/native/jna/src/main/java/module-info.java @@ -15,5 +15,7 @@ requires org.elasticsearch.logging; requires com.sun.jna; + exports org.elasticsearch.nativeaccess.jna to com.sun.jna; + provides NativeLibraryProvider with JnaNativeLibraryProvider; } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index bec9e75bdc2ce..f7a67a2bf5162 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -10,13 +10,43 @@ import com.sun.jna.Library; import com.sun.jna.Native; +import com.sun.jna.NativeLong; +import com.sun.jna.Structure; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; +import java.util.Arrays; +import java.util.List; + class JnaPosixCLibrary implements PosixCLibrary { + /** corresponds to struct rlimit */ + public static final class JnaRLimit extends Structure implements Structure.ByReference, RLimit { + public NativeLong rlim_cur = new NativeLong(0); + public NativeLong rlim_max = new NativeLong(0); + + @Override + protected List getFieldOrder() { + return Arrays.asList("rlim_cur", "rlim_max"); + } + + @Override + public long rlim_cur() { + return rlim_cur.longValue(); + } + + @Override + public long rlim_max() { + return rlim_max.longValue(); + } + } + private interface NativeFunctions extends Library { int geteuid(); + + int getrlimit(int resource, JnaRLimit rlimit); + + String strerror(int errno); } private final NativeFunctions functions; @@ -29,4 +59,26 @@ private interface NativeFunctions extends Library { public int geteuid() { return functions.geteuid(); } + + @Override + public RLimit newRLimit() { + return new JnaRLimit(); + } + + @Override + public int getrlimit(int resource, RLimit rlimit) { + assert rlimit instanceof JnaRLimit; + var jnaRlimit = (JnaRLimit) rlimit; + return functions.getrlimit(resource, jnaRlimit); + } + + @Override + public String strerror(int errno) { + return functions.strerror(errno); + } + + @Override + public int errno() { + return Native.getLastError(); + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index 64f13c12f7735..605c20ec32ff6 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -16,10 +16,20 @@ class LinuxNativeAccess extends PosixNativeAccess { Systemd systemd; LinuxNativeAccess(NativeLibraryProvider libraryProvider) { - super("Linux", libraryProvider); + super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1)); this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); } + @Override + protected long getMaxThreads() { + // this is only valid on Linux and the value *is* different on OS X + // see /usr/include/sys/resource.h on OS X + // on Linux the resource RLIMIT_NPROC means *the number of threads* + // this is in opposition to BSD-derived OSes + final int rlimit_nproc = 6; + return getRLimit(rlimit_nproc, "max number of threads"); + } + @Override public Systemd systemd() { return systemd; diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java index 9f29ac7668a47..45c2eb595c4eb 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java @@ -13,6 +13,11 @@ class MacNativeAccess extends PosixNativeAccess { MacNativeAccess(NativeLibraryProvider libraryProvider) { - super("MacOS", libraryProvider); + super("MacOS", libraryProvider, new PosixConstants(9223372036854775807L, 5, 1)); + } + + @Override + protected long getMaxThreads() { + return ProcessLimits.UNKNOWN; } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java index 20e143d2e1924..173f60cafaa3e 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -29,6 +29,11 @@ static NativeAccess instance() { */ boolean definitelyRunningAsRoot(); + /** + * Return limits for the current process. + */ + ProcessLimits getProcessLimits(); + Systemd systemd(); /** diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java index 035c539dbf5c1..f21b6dcffbcaf 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java @@ -25,6 +25,12 @@ public boolean definitelyRunningAsRoot() { return false; } + @Override + public ProcessLimits getProcessLimits() { + logger.warn("Cannot get process limits because native access is not available"); + return new ProcessLimits(ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN); + } + @Override public Systemd systemd() { logger.warn("Cannot get systemd access because native access is not available"); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java new file mode 100644 index 0000000000000..a2073e170967e --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +/** + * Code constants on POSIX systems. + */ +record PosixConstants(long RLIMIT_INFINITY, int RLIMIT_AS, int RLIMIT_FSIZE) {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java index c390cfc9289c6..932a57b9e5d47 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -18,11 +18,40 @@ abstract class PosixNativeAccess extends AbstractNativeAccess { protected final PosixCLibrary libc; protected final VectorSimilarityFunctions vectorDistance; + protected final PosixConstants constants; + protected final ProcessLimits processLimits; - PosixNativeAccess(String name, NativeLibraryProvider libraryProvider) { + PosixNativeAccess(String name, NativeLibraryProvider libraryProvider, PosixConstants constants) { super(name, libraryProvider); this.libc = libraryProvider.getLibrary(PosixCLibrary.class); this.vectorDistance = vectorSimilarityFunctionsOrNull(libraryProvider); + this.constants = constants; + this.processLimits = new ProcessLimits( + getMaxThreads(), + getRLimit(constants.RLIMIT_AS(), "max size virtual memory"), + getRLimit(constants.RLIMIT_FSIZE(), "max file size") + ); + } + + /** + * Return the maximum number of threads this process may start, or {@link ProcessLimits#UNKNOWN}. + */ + protected abstract long getMaxThreads(); + + /** + * Return the current rlimit for the given resource. + * If getrlimit fails, returns {@link ProcessLimits#UNKNOWN}. + * If the rlimit is unlimited, returns {@link ProcessLimits#UNLIMITED}. + * */ + protected long getRLimit(int resource, String description) { + var rlimit = libc.newRLimit(); + if (libc.getrlimit(resource, rlimit) == 0) { + long value = rlimit.rlim_cur(); + return value == constants.RLIMIT_INFINITY() ? ProcessLimits.UNLIMITED : value; + } else { + logger.warn("unable to retrieve " + description + " [" + libc.strerror(libc.errno()) + "]"); + return ProcessLimits.UNKNOWN; + } } static VectorSimilarityFunctions vectorSimilarityFunctionsOrNull(NativeLibraryProvider libraryProvider) { @@ -39,6 +68,11 @@ public boolean definitelyRunningAsRoot() { return libc.geteuid() == 0; } + @Override + public ProcessLimits getProcessLimits() { + return processLimits; + } + @Override public Optional getVectorSimilarityFunctions() { return Optional.ofNullable(vectorDistance); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/ProcessLimits.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/ProcessLimits.java new file mode 100644 index 0000000000000..41d8c57579eed --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/ProcessLimits.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +/** + * Limits for the current process. + * + * @param maxThreads The maximum number of threads that may be created. + * @param maxVirtualMemorySize The maximum size of virtual memory. + * @param maxFileSize The maximum size of a single file. + */ +public record ProcessLimits(long maxThreads, long maxVirtualMemorySize, long maxFileSize) { + public static final long UNKNOWN = -1; + public static final long UNLIMITED = Long.MAX_VALUE; +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java index 387474b62b5f5..f7b5d8e865acc 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java @@ -23,6 +23,11 @@ public boolean definitelyRunningAsRoot() { return false; // don't know } + @Override + public ProcessLimits getProcessLimits() { + return new ProcessLimits(ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN); + } + @Override public Optional getVectorSimilarityFunctions() { return Optional.empty(); // not supported yet diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index ecc28c682027a..db259bd09e1cd 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -20,4 +20,19 @@ public non-sealed interface PosixCLibrary extends NativeLibrary { * @see geteuid */ int geteuid(); + + /** corresponds to struct rlimit */ + interface RLimit { + long rlim_cur(); + + long rlim_max(); + } + + RLimit newRLimit(); + + int getrlimit(int resource, RLimit rlimit); + + String strerror(int errno); + + int errno(); } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 45993d6b20e0a..6fb7c70dd1580 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -12,17 +12,58 @@ import org.elasticsearch.logging.Logger; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; +import java.lang.foreign.Arena; import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.Linker; +import java.lang.foreign.MemoryLayout; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.StructLayout; import java.lang.invoke.MethodHandle; +import java.lang.invoke.VarHandle; +import static java.lang.foreign.MemoryLayout.PathElement.groupElement; +import static java.lang.foreign.ValueLayout.ADDRESS; import static java.lang.foreign.ValueLayout.JAVA_INT; +import static java.lang.foreign.ValueLayout.JAVA_LONG; import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; +import static org.elasticsearch.nativeaccess.jdk.MemorySegmentUtil.varHandleDropOffset; class JdkPosixCLibrary implements PosixCLibrary { private static final Logger logger = LogManager.getLogger(JdkPosixCLibrary.class); + // errno can change between system calls, so we capture it + private static final StructLayout CAPTURE_ERRNO_LAYOUT = Linker.Option.captureStateLayout(); + static final Linker.Option CAPTURE_ERRNO_OPTION = Linker.Option.captureCallState("errno"); + private static final VarHandle errno$vh = CAPTURE_ERRNO_LAYOUT.varHandle(groupElement("errno")); + private static final MethodHandle geteuid$mh = downcallHandle("geteuid", FunctionDescriptor.of(JAVA_INT)); + private static final MethodHandle strerror$mh = downcallHandle("strerror", FunctionDescriptor.of(ADDRESS, JAVA_INT)); + private static final MethodHandle getrlimit$mh = downcallHandleWithErrno( + "getrlimit", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS) + ); + + static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); + + static MethodHandle downcallHandleWithErrno(String function, FunctionDescriptor functionDescriptor) { + return downcallHandle(function, functionDescriptor, CAPTURE_ERRNO_OPTION); + } + + @Override + public int errno() { + return (int) errno$vh.get(errnoState); + } + + @Override + public String strerror(int errno) { + try { + MemorySegment str = (MemorySegment) strerror$mh.invokeExact(errno); + return str.reinterpret(Long.MAX_VALUE).getUtf8String(0); + } catch (Throwable t) { + throw new AssertionError(t); + } + } @Override public int geteuid() { @@ -32,4 +73,48 @@ public int geteuid() { throw new AssertionError(t); } } + + @Override + public RLimit newRLimit() { + return new JdkRLimit(); + } + + @Override + public int getrlimit(int resource, RLimit rlimit) { + assert rlimit instanceof JdkRLimit; + var jdkRlimit = (JdkRLimit) rlimit; + try { + return (int) getrlimit$mh.invokeExact(errnoState, resource, jdkRlimit.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + static class JdkRLimit implements RLimit { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); + private static final VarHandle rlim_cur$vh = varHandleDropOffset(layout.varHandle(groupElement(0))); + private static final VarHandle rlim_max$vh = varHandleDropOffset(layout.varHandle(groupElement(1))); + + private final MemorySegment segment; + + JdkRLimit() { + var arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + } + + @Override + public long rlim_cur() { + return (long) rlim_cur$vh.get(segment); + } + + @Override + public long rlim_max() { + return (long) rlim_max$vh.get(segment); + } + + @Override + public String toString() { + return "JdkRLimit[rlim_cur=" + rlim_cur() + ", rlim_max=" + rlim_max(); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index 6ac0243c3befe..e1534cf1d9382 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -10,6 +10,7 @@ import java.lang.foreign.Arena; import java.lang.foreign.MemorySegment; +import java.lang.invoke.VarHandle; /** * Utility methods to act on MemorySegment apis which have changed in subsequent JDK releases. @@ -24,5 +25,11 @@ static MemorySegment allocateString(Arena arena, String s) { return arena.allocateUtf8String(s); } + // MemorySegment.varHandle changed between 21 and 22. The resulting varHandle now requires an additional + // long offset parameter. We omit the offset at runtime, instead binding to the VarHandle in JDK 22. + static VarHandle varHandleDropOffset(VarHandle varHandle) { + return varHandle; + } + private MemorySegmentUtil() {} } diff --git a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index 59bb57d174009..db86d0d1734eb 100644 --- a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -10,8 +10,10 @@ import java.lang.foreign.Arena; import java.lang.foreign.MemorySegment; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; -public class MemorySegmentUtil { +class MemorySegmentUtil { static String getString(MemorySegment segment, long offset) { return segment.getString(offset); @@ -21,5 +23,11 @@ static MemorySegment allocateString(Arena arena, String s) { return arena.allocateFrom(s); } + // MemorySegment.varHandle changed between 21 and 22. The resulting varHandle now requires an additional + // long offset parameter. We omit the offset at runtime, instead binding to the VarHandle in JDK 22. + static VarHandle varHandleDropOffset(VarHandle varHandle) { + return MethodHandles.insertCoordinates(varHandle, 1, 0L); + } + private MemorySegmentUtil() {} } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ProcessLimitsTests.java similarity index 65% rename from qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java rename to libs/native/src/test/java/org/elasticsearch/nativeaccess/ProcessLimitsTests.java index 8c4326082d509..5750c95e8892d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ProcessLimitsTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.bootstrap; +package org.elasticsearch.nativeaccess; import org.apache.lucene.util.Constants; import org.elasticsearch.core.PathUtils; @@ -16,11 +16,13 @@ import java.nio.file.Files; import java.util.List; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -public class EvilJNANativesTests extends ESTestCase { +@ESTestCase.WithoutSecurityManager +public class ProcessLimitsTests extends ESTestCase { + + private static final NativeAccess nativeAccess = NativeAccess.instance(); public void testSetMaximumNumberOfThreads() throws IOException { if (Constants.LINUX) { @@ -28,14 +30,14 @@ public void testSetMaximumNumberOfThreads() throws IOException { for (final String line : lines) { if (line != null && line.startsWith("Max processes")) { final String[] fields = line.split("\\s+"); - final long limit = "unlimited".equals(fields[2]) ? JNACLibrary.RLIM_INFINITY : Long.parseLong(fields[2]); - assertThat(JNANatives.MAX_NUMBER_OF_THREADS, equalTo(limit)); + final long limit = "unlimited".equals(fields[2]) ? ProcessLimits.UNLIMITED : Long.parseLong(fields[2]); + assertThat(nativeAccess.getProcessLimits().maxThreads(), equalTo(limit)); return; } } fail("should have read max processes from /proc/self/limits"); } else { - assertThat(JNANatives.MAX_NUMBER_OF_THREADS, equalTo(-1L)); + assertThat(nativeAccess.getProcessLimits().maxThreads(), equalTo(-1L)); } } @@ -45,16 +47,16 @@ public void testSetMaxSizeVirtualMemory() throws IOException { for (final String line : lines) { if (line != null && line.startsWith("Max address space")) { final String[] fields = line.split("\\s+"); - final String limit = fields[3]; - assertThat(JNANatives.rlimitToString(JNANatives.MAX_SIZE_VIRTUAL_MEMORY), equalTo(limit)); + final long limit = "unlimited".equals(fields[3]) ? ProcessLimits.UNLIMITED : Long.parseLong(fields[3]); + assertThat(nativeAccess.getProcessLimits().maxVirtualMemorySize(), equalTo(limit)); return; } } fail("should have read max size virtual memory from /proc/self/limits"); } else if (Constants.MAC_OS_X) { - assertThat(JNANatives.MAX_SIZE_VIRTUAL_MEMORY, anyOf(equalTo(Long.MIN_VALUE), greaterThanOrEqualTo(0L))); + assertThat(nativeAccess.getProcessLimits().maxVirtualMemorySize(), greaterThanOrEqualTo(0L)); } else { - assertThat(JNANatives.MAX_SIZE_VIRTUAL_MEMORY, equalTo(Long.MIN_VALUE)); + assertThat(nativeAccess.getProcessLimits().maxVirtualMemorySize(), equalTo(ProcessLimits.UNKNOWN)); } } @@ -64,16 +66,16 @@ public void testSetMaxFileSize() throws IOException { for (final String line : lines) { if (line != null && line.startsWith("Max file size")) { final String[] fields = line.split("\\s+"); - final String limit = fields[3]; - assertThat(JNANatives.rlimitToString(JNANatives.MAX_FILE_SIZE), equalTo(limit)); + final long limit = "unlimited".equals(fields[3]) ? ProcessLimits.UNLIMITED : Long.parseLong(fields[3]); + assertThat(nativeAccess.getProcessLimits().maxFileSize(), equalTo(limit)); return; } } fail("should have read max file size from /proc/self/limits"); } else if (Constants.MAC_OS_X) { - assertThat(JNANatives.MAX_FILE_SIZE, anyOf(equalTo(Long.MIN_VALUE), greaterThanOrEqualTo(0L))); + assertThat(nativeAccess.getProcessLimits().maxFileSize(), greaterThanOrEqualTo(0L)); } else { - assertThat(JNANatives.MAX_FILE_SIZE, equalTo(Long.MIN_VALUE)); + assertThat(nativeAccess.getProcessLimits().maxFileSize(), equalTo(ProcessLimits.UNKNOWN)); } } diff --git a/libs/plugin-scanner/build.gradle b/libs/plugin-scanner/build.gradle index fbe9c02092577..2f7ab5c22b967 100644 --- a/libs/plugin-scanner/build.gradle +++ b/libs/plugin-scanner/build.gradle @@ -19,8 +19,8 @@ dependencies { api project(':libs:elasticsearch-plugin-api') api project(":libs:elasticsearch-x-content") - api 'org.ow2.asm:asm:9.6' - api 'org.ow2.asm:asm-tree:9.6' + api 'org.ow2.asm:asm:9.7' + api 'org.ow2.asm:asm-tree:9.7' testImplementation "junit:junit:${versions.junit}" testImplementation(project(":test:framework")) { diff --git a/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorer.java b/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorer.java index b346c73cdb1f3..42165fe9f5905 100644 --- a/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorer.java +++ b/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorer.java @@ -22,4 +22,6 @@ public interface VectorScorer { /** The maximum ordinal of vector this scorer can score. */ int maxOrd(); + VectorScorer copy(); + } diff --git a/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorerSupplierAdapter.java b/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorerSupplierAdapter.java index a961607f2305e..ed585d7846530 100644 --- a/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorerSupplierAdapter.java +++ b/libs/vec/src/main/java/org/elasticsearch/vec/VectorScorerSupplierAdapter.java @@ -41,6 +41,6 @@ public int maxOrd() { @Override public RandomVectorScorerSupplier copy() throws IOException { - return this; // no need to copy, thread-safe + return new VectorScorerSupplierAdapter(scorer.copy()); } } diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java index 5231bb8e3c67f..16be864cb8d92 100644 --- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java +++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java @@ -54,4 +54,9 @@ public float score(int firstOrd, int secondOrd) throws IOException { return Math.max(fallbackScore(firstByteOffset, secondByteOffset), 0f); } } + + @Override + public Int7DotProduct copy() { + return new Int7DotProduct(dims, maxOrd, scoreCorrectionConstant, input.clone()); + } } diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java index 55b08a899bd7c..3bed20b70e494 100644 --- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java +++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7Euclidean.java @@ -48,4 +48,9 @@ public float score(int firstOrd, int secondOrd) throws IOException { return fallbackScore(firstByteOffset, secondByteOffset); } } + + @Override + public Int7Euclidean copy() { + return new Int7Euclidean(dims, maxOrd, scoreCorrectionConstant, input.clone()); + } } diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java index 5cdfc62bc9071..e6045f951cac3 100644 --- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java +++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7MaximumInnerProduct.java @@ -64,4 +64,9 @@ static float scaleMaxInnerProductScore(float rawSimilarity) { } return rawSimilarity + 1; } + + @Override + public Int7MaximumInnerProduct copy() { + return new Int7MaximumInnerProduct(dims, maxOrd, scoreCorrectionConstant, input.clone()); + } } diff --git a/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java b/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java index 07d30a887c683..8c010295764d5 100644 --- a/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java +++ b/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java @@ -15,14 +15,23 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.util.hnsw.RandomVectorScorer; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.IntStream; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.elasticsearch.vec.VectorSimilarityType.COSINE; import static org.elasticsearch.vec.VectorSimilarityType.DOT_PRODUCT; import static org.elasticsearch.vec.VectorSimilarityType.EUCLIDEAN; @@ -327,6 +336,78 @@ public void testLarge() throws IOException { } } + public void testRace() throws Exception { + testRaceImpl(COSINE); + testRaceImpl(DOT_PRODUCT); + testRaceImpl(EUCLIDEAN); + testRaceImpl(MAXIMUM_INNER_PRODUCT); + } + + // Tests that copies in threads do not interfere with each other + void testRaceImpl(VectorSimilarityType sim) throws Exception { + assumeTrue(notSupportedMsg(), supported()); + var factory = AbstractVectorTestCase.factory.get(); + + final long maxChunkSize = 32; + final int dims = 34; // dimensions that are larger than the chunk size, to force fallback + byte[] vec1 = new byte[dims]; + byte[] vec2 = new byte[dims]; + IntStream.range(0, dims).forEach(i -> vec1[i] = 1); + IntStream.range(0, dims).forEach(i -> vec2[i] = 2); + try (Directory dir = new MMapDirectory(createTempDir("testRace"), maxChunkSize)) { + String fileName = getTestName() + "-" + dims; + try (IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT)) { + var one = floatToByteArray(1f); + byte[] bytes = concat(vec1, one, vec1, one, vec2, one, vec2, one); + out.writeBytes(bytes, 0, bytes.length); + } + var expectedScore1 = luceneScore(sim, vec1, vec1, 1, 1, 1); + var expectedScore2 = luceneScore(sim, vec2, vec2, 1, 1, 1); + + try (IndexInput in = dir.openInput(fileName, IOContext.DEFAULT)) { + var scoreSupplier = factory.getInt7ScalarQuantizedVectorScorer(dims, 4, 1, sim, in).get(); + var scorer = new VectorScorerSupplierAdapter(scoreSupplier); + var tasks = List.>>of( + new ScoreCallable(scorer.copy().scorer(0), 1, expectedScore1), + new ScoreCallable(scorer.copy().scorer(2), 3, expectedScore2) + ); + var executor = Executors.newFixedThreadPool(2); + var results = executor.invokeAll(tasks); + executor.shutdown(); + assertTrue(executor.awaitTermination(60, TimeUnit.SECONDS)); + assertThat(results.stream().filter(Predicate.not(Future::isDone)).count(), equalTo(0L)); + for (var res : results) { + assertThat("Unexpected exception" + res.get(), res.get(), isEmpty()); + } + } + } + } + + static class ScoreCallable implements Callable> { + + final RandomVectorScorer scorer; + final int ord; + final float expectedScore; + + ScoreCallable(RandomVectorScorer scorer, int ord, float expectedScore) { + this.scorer = scorer; + this.ord = ord; + this.expectedScore = expectedScore; + } + + @Override + public Optional call() throws Exception { + try { + for (int i = 0; i < 100; i++) { + assertThat(scorer.score(ord), equalTo(expectedScore)); + } + } catch (Throwable t) { + return Optional.of(t); + } + return Optional.empty(); + } + } + // creates the vector based on the given ordinal, which is reproducible given the ord and dims static byte[] vector(int ord, int dims) { var random = new Random(Objects.hash(ord, dims)); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml index dda1e7acce3b5..a48b188e23064 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml @@ -90,8 +90,8 @@ teardown: - gte: { ingest.pipelines.ingest_info_pipeline.time_in_millis: 0 } - match: { ingest.pipelines.ingest_info_pipeline.current: 0 } - match: { ingest.pipelines.ingest_info_pipeline.failed: 0 } - - gt: { ingest.pipelines.ingest_info_pipeline.ingested_in_bytes: 0 } - - gt: { ingest.pipelines.ingest_info_pipeline.produced_in_bytes: 0 } + - gt: { ingest.pipelines.ingest_info_pipeline.ingested_as_first_pipeline_in_bytes: 0 } + - gt: { ingest.pipelines.ingest_info_pipeline.produced_as_first_pipeline_in_bytes: 0 } # Processors section - is_true: ingest.pipelines.ingest_info_pipeline.processors.0.set @@ -129,8 +129,8 @@ teardown: cluster.info: target: [ ingest ] - match: { ingest.pipelines.pipeline-1.failed: 1 } - - gt: { ingest.pipelines.pipeline-1.ingested_in_bytes: 0 } - - match: { ingest.pipelines.pipeline-1.produced_in_bytes: 0 } + - gt: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: 0 } + - match: { ingest.pipelines.pipeline-1.produced_as_first_pipeline_in_bytes: 0 } --- "Test drop processor": @@ -156,8 +156,8 @@ teardown: - do: cluster.info: target: [ ingest ] - - gt: { ingest.pipelines.pipeline-1.ingested_in_bytes: 0 } - - match: { ingest.pipelines.pipeline-1.produced_in_bytes: 0 } + - gt: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: 0 } + - match: { ingest.pipelines.pipeline-1.produced_as_first_pipeline_in_bytes: 0 } --- "Test that pipeline processor has byte stats recorded in first pipeline": @@ -198,7 +198,6 @@ teardown: - do: bulk: refresh: true - index: index-foo body: - '{"index": { "_index": "an-index", "_id": 1 }}' - '{"some-field": 1 }' @@ -211,11 +210,11 @@ teardown: - do: cluster.info: target: [ ingest ] - - gt: { ingest.pipelines.pipeline-1.ingested_in_bytes: 0 } - - set: { ingest.pipelines.pipeline-1.ingested_in_bytes: ingest_bytes } - - gt: { ingest.pipelines.pipeline-1.produced_in_bytes: $ingest_bytes } - - match: { ingest.pipelines.pipeline-2.ingested_in_bytes: null } - - match: { ingest.pipelines.pipeline-2.produced_in_bytes: null } + - gt: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: 0 } + - set: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: ingest_bytes } + - gt: { ingest.pipelines.pipeline-1.produced_as_first_pipeline_in_bytes: $ingest_bytes } + - match: { ingest.pipelines.pipeline-2.ingested_as_first_pipeline_in_bytes: 0 } + - match: { ingest.pipelines.pipeline-2.produced_as_first_pipeline_in_bytes: 0 } --- "Test that final pipeline has byte stats recorded in first pipeline": @@ -251,7 +250,6 @@ teardown: - do: bulk: refresh: true - index: index-foo body: - '{"index": { "_index": "an-index", "_id": 1 }}' - '{"some-field": 1 }' @@ -264,12 +262,11 @@ teardown: - do: cluster.info: target: [ ingest ] - - gt: { ingest.pipelines.pipeline-1.ingested_in_bytes: 0 } - - set: { ingest.pipelines.pipeline-1.ingested_in_bytes: ingest_bytes } - - gt: { ingest.pipelines.pipeline-1.produced_in_bytes: $ingest_bytes } - - match: { ingest.pipelines.pipeline-2.ingested_in_bytes: null } - - match: { ingest.pipelines.pipeline-2.produced_in_bytes: null } - + - gt: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: 0 } + - set: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: ingest_bytes } + - gt: { ingest.pipelines.pipeline-1.produced_as_first_pipeline_in_bytes: $ingest_bytes } + - match: { ingest.pipelines.pipeline-2.ingested_as_first_pipeline_in_bytes: 0 } + - match: { ingest.pipelines.pipeline-2.produced_as_first_pipeline_in_bytes: 0 } --- "Test that reroute processor has byte stats recorded in first pipeline": @@ -330,8 +327,41 @@ teardown: - do: cluster.info: target: [ ingest ] - - gt: { ingest.pipelines.pipeline-1.ingested_in_bytes: 0 } - - set: { ingest.pipelines.pipeline-1.ingested_in_bytes: ingest_bytes } - - gt: { ingest.pipelines.pipeline-1.produced_in_bytes: $ingest_bytes } - - match: { ingest.pipelines.pipeline-2.ingested_in_bytes: null } - - match: { ingest.pipelines.pipeline-2.produced_in_bytes: null } + - gt: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: 0 } + - set: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: ingest_bytes } + - gt: { ingest.pipelines.pipeline-1.produced_as_first_pipeline_in_bytes: $ingest_bytes } + - match: { ingest.pipelines.pipeline-2.ingested_as_first_pipeline_in_bytes: 0 } + - match: { ingest.pipelines.pipeline-2.produced_as_first_pipeline_in_bytes: 0 } + +--- +"Test human readable byte stat fields": + - do: + ingest.put_pipeline: + id: "pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "added-field", + "value": true + } + } + ] + } + - do: + bulk: + refresh: true + body: + - '{"index": { "_index": "an-index", "_id": 1, "pipeline": "pipeline-1"}}' + - '{"some-field": 1 }' + - do: + cluster.info: + target: [ ingest ] + human: true + + - match: { ingest.pipelines.pipeline-1.count: 1 } + - gt: { ingest.pipelines.pipeline-1.ingested_as_first_pipeline_in_bytes: 0 } + - gt: { ingest.pipelines.pipeline-1.produced_as_first_pipeline_in_bytes: 0 } + - is_true: ingest.pipelines.pipeline-1.ingested_as_first_pipeline + - is_true: ingest.pipelines.pipeline-1.produced_as_first_pipeline diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml index b2cbb352448ab..53229290da03e 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml @@ -6,11 +6,11 @@ teardown: ignore: 404 - do: ingest.delete_pipeline: - id: "logs-router-default" + id: "logs-router" ignore: 404 - do: ingest.delete_pipeline: - id: "logs-nginx-default" + id: "logs-nginx" ignore: 404 - do: indices.delete_index_template: @@ -20,6 +20,22 @@ teardown: indices.delete_index_template: name: logs-nginx ignore: 404 + - do: + indices.delete: + index: "index-1" + ignore_unavailable: true + - do: + indices.delete: + index: "index-2" + ignore_unavailable: true + - do: + ingest.delete_pipeline: + id: "pipeline-1" + ignore: 404 + - do: + ingest.delete_pipeline: + id: "pipeline-2" + ignore: 404 --- "Test first matching router terminates pipeline": @@ -28,7 +44,7 @@ teardown: - do: ingest.put_pipeline: id: "pipeline-with-two-data-stream-processors" - body: > + body: > { "processors": [ { @@ -71,9 +87,15 @@ teardown: - do: ingest.put_pipeline: id: "logs-router" - body: > + body: > { "processors": [ + { + "set" : { + "field": "added-in-pipeline-before-first-reroute", + "value": true + } + }, { "reroute" : { "tag": "nginx", @@ -91,15 +113,23 @@ teardown: name: logs-router body: index_patterns: [ "logs-router-*" ] + priority: 500 + data_stream: { } template: settings: index.default_pipeline: "logs-router" - do: ingest.put_pipeline: id: "logs-nginx" - body: > + body: > { "processors": [ + { + "set" : { + "field": "added-in-pipeline-before-second-reroute", + "value": true + } + }, { "reroute": { "tag": "nginx.access", @@ -117,13 +147,24 @@ teardown: ] } - match: { acknowledged: true } + - do: + allowed_warnings: + - "index template [logs-nginx] has index patterns [logs-nginx-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-nginx] will take precedence during new index creation" + indices.put_index_template: + name: logs-nginx + body: + index_patterns: [ "logs-nginx-*" ] + priority: 500 + data_stream: { } + template: + settings: + index.default_pipeline: "logs-nginx" - do: index: refresh: true - index: logs-nginx-default + index: logs-router-default id: "example-log" - pipeline: "logs-nginx" op_type: create body: "@timestamp": "2022-04-13" @@ -139,3 +180,75 @@ teardown: query: match: {"_id": "example-log"} - match: { hits.hits.0._source.message: "this is an error log" } + - match: { hits.hits.0._source.added-in-pipeline-before-first-reroute: true } + - match: { hits.hits.0._source.added-in-pipeline-before-second-reroute: true } + +--- +"Test pipeline run after reroute": + - do: + ingest.put_pipeline: + id: "pipeline-1" + body: > + { + "processors": [ + { + "set" : { + "field": "added-in-pipeline-before-reroute", + "value": true + } + }, + { + "reroute" : { + "destination": "index-2" + } + } + ] + } + - match: { acknowledged: true } + - do: + ingest.put_pipeline: + id: "pipeline-2" + body: > + { + "processors": [ + { + "set" : { + "field": "added-in-pipeline-after-reroute", + "value": true + } + } + ] + } + - match: { acknowledged: true } + - do: + indices.create: + index: index-1 + body: + settings: + index: + default_pipeline: "pipeline-1" + - match: { acknowledged: true } + - do: + indices.create: + index: index-2 + body: + settings: + index: + default_pipeline: "pipeline-2" + - match: { acknowledged: true } + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "index-1", "_id": "1" }}' + - '{"existing-field": true}' + - do: + indices.refresh: + index: [index-2] + - do: + get: + index: index-2 + id: "1" + - match: { _source.existing-field : true } + - match: { _source.added-in-pipeline-before-reroute : true } + - match: { _source.added-in-pipeline-after-reroute : true } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java index fe7f03529a421..aa8656dc14d91 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java @@ -30,6 +30,7 @@ enum Database { Set.of( Property.IP, Property.COUNTRY_ISO_CODE, + Property.CONTINENT_CODE, Property.COUNTRY_NAME, Property.CONTINENT_NAME, Property.REGION_ISO_CODE, @@ -49,7 +50,7 @@ enum Database { ) ), Country( - Set.of(Property.IP, Property.CONTINENT_NAME, Property.COUNTRY_NAME, Property.COUNTRY_ISO_CODE), + Set.of(Property.IP, Property.CONTINENT_CODE, Property.CONTINENT_NAME, Property.COUNTRY_NAME, Property.COUNTRY_ISO_CODE), Set.of(Property.CONTINENT_NAME, Property.COUNTRY_NAME, Property.COUNTRY_ISO_CODE) ), Asn( @@ -82,6 +83,7 @@ enum Database { Property.IP, Property.COUNTRY_ISO_CODE, Property.COUNTRY_NAME, + Property.CONTINENT_CODE, Property.CONTINENT_NAME, Property.REGION_ISO_CODE, Property.REGION_NAME, @@ -235,6 +237,7 @@ enum Property { IP, COUNTRY_ISO_CODE, COUNTRY_NAME, + CONTINENT_CODE, CONTINENT_NAME, REGION_ISO_CODE, REGION_NAME, diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 8e7f5d575378d..e39705a71f56c 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -234,6 +234,12 @@ private Map retrieveCityGeoData(GeoIpDatabase geoIpDatabase, Ine geoData.put("country_name", countryName); } } + case CONTINENT_CODE -> { + String continentCode = continent.getCode(); + if (continentCode != null) { + geoData.put("continent_code", continentCode); + } + } case CONTINENT_NAME -> { String continentName = continent.getName(); if (continentName != null) { @@ -307,6 +313,12 @@ private Map retrieveCountryGeoData(GeoIpDatabase geoIpDatabase, geoData.put("country_name", countryName); } } + case CONTINENT_CODE -> { + String continentCode = continent.getCode(); + if (continentCode != null) { + geoData.put("continent_code", continentCode); + } + } case CONTINENT_NAME -> { String continentName = continent.getName(); if (continentName != null) { @@ -485,6 +497,12 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas geoData.put("country_name", countryName); } } + case CONTINENT_CODE -> { + String continentCode = continent.getCode(); + if (continentCode != null) { + geoData.put("continent_code", continentCode); + } + } case CONTINENT_NAME -> { String continentName = continent.getName(); if (continentName != null) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 99330224451ca..663ae1152246a 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -196,7 +196,7 @@ public void testBuildWithCountryDbAndAsnFields() throws Exception { equalTo( "[properties] illegal property value [" + asnProperty - + "]. valid values are [IP, COUNTRY_ISO_CODE, COUNTRY_NAME, CONTINENT_NAME]" + + "]. valid values are [IP, COUNTRY_ISO_CODE, COUNTRY_NAME, CONTINENT_CODE, CONTINENT_NAME]" ) ); } @@ -278,7 +278,7 @@ public void testBuildIllegalFieldOption() throws Exception { e.getMessage(), equalTo( "[properties] illegal property value [invalid]. valid values are [IP, COUNTRY_ISO_CODE, " - + "COUNTRY_NAME, CONTINENT_NAME, REGION_ISO_CODE, REGION_NAME, CITY_NAME, TIMEZONE, LOCATION]" + + "COUNTRY_NAME, CONTINENT_CODE, CONTINENT_NAME, REGION_ISO_CODE, REGION_NAME, CITY_NAME, TIMEZONE, LOCATION]" ) ); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index 6eb4e9b1acb51..6276155d9f083 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -80,10 +80,11 @@ public void testCity() throws Exception { assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo("8.8.8.8")); @SuppressWarnings("unchecked") Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field"); - assertThat(geoData.size(), equalTo(6)); + assertThat(geoData.size(), equalTo(7)); assertThat(geoData.get("ip"), equalTo("8.8.8.8")); assertThat(geoData.get("country_iso_code"), equalTo("US")); assertThat(geoData.get("country_name"), equalTo("United States")); + assertThat(geoData.get("continent_code"), equalTo("NA")); assertThat(geoData.get("continent_name"), equalTo("North America")); assertThat(geoData.get("timezone"), equalTo("America/Chicago")); Map location = new HashMap<>(); @@ -197,10 +198,11 @@ public void testCity_withIpV6() throws Exception { assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(address)); @SuppressWarnings("unchecked") Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field"); - assertThat(geoData.size(), equalTo(9)); + assertThat(geoData.size(), equalTo(10)); assertThat(geoData.get("ip"), equalTo(address)); assertThat(geoData.get("country_iso_code"), equalTo("US")); assertThat(geoData.get("country_name"), equalTo("United States")); + assertThat(geoData.get("continent_code"), equalTo("NA")); assertThat(geoData.get("continent_name"), equalTo("North America")); assertThat(geoData.get("region_iso_code"), equalTo("US-FL")); assertThat(geoData.get("region_name"), equalTo("Florida")); @@ -260,10 +262,11 @@ public void testCountry() throws Exception { assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo("82.170.213.79")); @SuppressWarnings("unchecked") Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field"); - assertThat(geoData.size(), equalTo(4)); + assertThat(geoData.size(), equalTo(5)); assertThat(geoData.get("ip"), equalTo("82.170.213.79")); assertThat(geoData.get("country_iso_code"), equalTo("NL")); assertThat(geoData.get("country_name"), equalTo("Netherlands")); + assertThat(geoData.get("continent_code"), equalTo("EU")); assertThat(geoData.get("continent_name"), equalTo("Europe")); } @@ -435,10 +438,11 @@ public void testEnterprise() throws Exception { assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip)); @SuppressWarnings("unchecked") Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field"); - assertThat(geoData.size(), equalTo(23)); + assertThat(geoData.size(), equalTo(24)); assertThat(geoData.get("ip"), equalTo(ip)); assertThat(geoData.get("country_iso_code"), equalTo("US")); assertThat(geoData.get("country_name"), equalTo("United States")); + assertThat(geoData.get("continent_code"), equalTo("NA")); assertThat(geoData.get("continent_name"), equalTo("North America")); assertThat(geoData.get("region_iso_code"), equalTo("US-NY")); assertThat(geoData.get("region_name"), equalTo("New York")); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java index a465ae7cd799d..eb958ef0ced80 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java @@ -75,6 +75,7 @@ public class MaxMindSupportTests extends ESTestCase { private static final Set CITY_SUPPORTED_FIELDS = Set.of( "city.name", + "continent.code", "continent.name", "country.isoCode", "country.name", @@ -88,7 +89,6 @@ public class MaxMindSupportTests extends ESTestCase { "city.confidence", "city.geoNameId", "city.names", - "continent.code", "continent.geoNameId", "continent.names", "country.confidence", @@ -156,9 +156,13 @@ public class MaxMindSupportTests extends ESTestCase { private static final Set CONNECT_TYPE_SUPPORTED_FIELDS = Set.of("connectionType"); private static final Set CONNECT_TYPE_UNSUPPORTED_FIELDS = Set.of("ipAddress", "network"); - private static final Set COUNTRY_SUPPORTED_FIELDS = Set.of("continent.name", "country.isoCode", "country.name"); - private static final Set COUNTRY_UNSUPPORTED_FIELDS = Set.of( + private static final Set COUNTRY_SUPPORTED_FIELDS = Set.of( + "continent.name", + "country.isoCode", "continent.code", + "country.name" + ); + private static final Set COUNTRY_UNSUPPORTED_FIELDS = Set.of( "continent.geoNameId", "continent.names", "country.confidence", @@ -209,6 +213,7 @@ public class MaxMindSupportTests extends ESTestCase { private static final Set ENTERPRISE_SUPPORTED_FIELDS = Set.of( "city.name", + "continent.code", "continent.name", "country.isoCode", "country.name", @@ -238,7 +243,6 @@ public class MaxMindSupportTests extends ESTestCase { "city.confidence", "city.geoNameId", "city.names", - "continent.code", "continent.geoNameId", "continent.names", "country.confidence", diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index aee0d313e4e00..b4ec0269db9a4 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportLogger; @@ -22,15 +22,15 @@ @ESIntegTestCase.ClusterScope(numDataNodes = 2, scope = ESIntegTestCase.Scope.TEST) public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { - private MockLogAppender appender; + private MockLog mockLog; public void setUp() throws Exception { super.setUp(); - appender = MockLogAppender.capture(ESLoggingHandler.class, TransportLogger.class, TcpTransport.class); + mockLog = MockLog.capture(ESLoggingHandler.class, TransportLogger.class, TcpTransport.class); } public void tearDown() throws Exception { - appender.close(); + mockLog.close(); super.tearDown(); } @@ -45,14 +45,14 @@ public void testLoggingHandler() { + ", version: .*" + ", action: cluster:monitor/nodes/stats\\[n\\]\\]" + " WRITE: \\d+B"; - final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation( + final MockLog.LoggingExpectation writeExpectation = new MockLog.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern ); - final MockLogAppender.LoggingExpectation flushExpectation = new MockLogAppender.SeenEventExpectation( + final MockLog.LoggingExpectation flushExpectation = new MockLog.SeenEventExpectation( "flush", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, @@ -66,32 +66,32 @@ public void testLoggingHandler() { + ", action: cluster:monitor/nodes/stats\\[n\\]\\]" + " READ: \\d+B"; - final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation( + final MockLog.LoggingExpectation readExpectation = new MockLog.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern ); - appender.addExpectation(writeExpectation); - appender.addExpectation(flushExpectation); - appender.addExpectation(readExpectation); + mockLog.addExpectation(writeExpectation); + mockLog.addExpectation(flushExpectation); + mockLog.addExpectation(readExpectation); client().admin().cluster().prepareNodesStats().get(TimeValue.timeValueSeconds(10)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } @TestLogging(value = "org.elasticsearch.transport.TcpTransport:DEBUG", reason = "to ensure we log connection events on DEBUG level") public void testConnectionLogging() throws IOException { - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "open connection log", TcpTransport.class.getCanonicalName(), Level.DEBUG, ".*opened transport connection \\[[1-9][0-9]*\\] to .*" ) ); - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "close connection log", TcpTransport.class.getCanonicalName(), Level.DEBUG, @@ -102,6 +102,6 @@ public void testConnectionLogging() throws IOException { final String nodeName = internalCluster().startNode(); internalCluster().stopNode(nodeName); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/muted-tests.yml b/muted-tests.yml index 80ae6e99040e6..155317b3b9418 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -10,6 +10,12 @@ tests: - class: "org.elasticsearch.xpack.core.ssl.SSLConfigurationReloaderTests" issue: "https://github.com/elastic/elasticsearch/issues/108774" method: "testReloadingKeyStore" +- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" + issue: "https://github.com/elastic/elasticsearch/issues/108808" + method: "test {k8s-metrics.MetricsWithAggs}" +- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" + issue: "https://github.com/elastic/elasticsearch/issues/108809" + method: "test {k8s-metrics.MetricsWithoutAggs}" # Examples: # # Mute a single test case in a YAML test suite: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/110_semantic_query.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/110_semantic_query.yml new file mode 100644 index 0000000000000..0155175f0e54a --- /dev/null +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/110_semantic_query.yml @@ -0,0 +1,37 @@ +--- +setup: + - requires: + cluster_features: "gte_v8.15.0" + reason: semantic query introduced in 8.15.0 + + - do: + indices.create: + index: test-index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 +--- +teardown: + + - do: + indices.delete: + index: test-index + ignore_unavailable: true + +--- +"Test that semantic query does not support cross-cluster search": + - do: + catch: bad_request + search: + index: "test-index,my_remote_cluster:test-index" + body: + query: + semantic: + field: "field" + query: "test query" + + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "semantic query does not support cross-cluster search" } diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 99b2728ebfa3c..6369e02e1f605 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.test.GraalVMThreadsFilter; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -66,10 +66,10 @@ public class SpawnerNoBootstrapTests extends LuceneTestCase { static { // normally done by ESTestCase, but need here because spawner depends on logging LogConfigurator.loadLog4jPlugins(); - MockLogAppender.init(); + MockLog.init(); } - static class ExpectedStreamMessage implements MockLogAppender.LoggingExpectation { + static class ExpectedStreamMessage implements MockLog.LoggingExpectation { final String expectedLogger; final String expectedMessage; final CountDownLatch matched; @@ -210,10 +210,10 @@ private void assertControllerSpawns(final Function pluginsDir Loggers.setLevel(LogManager.getLogger(stderrLoggerName), Level.TRACE); CountDownLatch messagesLoggedLatch = new CountDownLatch(2); - try (var appender = MockLogAppender.capture(stdoutLoggerName, stderrLoggerName)) { + try (var mockLog = MockLog.capture(stdoutLoggerName, stderrLoggerName)) { if (expectSpawn) { - appender.addExpectation(new ExpectedStreamMessage(stdoutLoggerName, "I am alive", messagesLoggedLatch)); - appender.addExpectation(new ExpectedStreamMessage(stderrLoggerName, "I am an error", messagesLoggedLatch)); + mockLog.addExpectation(new ExpectedStreamMessage(stdoutLoggerName, "I am alive", messagesLoggedLatch)); + mockLog.addExpectation(new ExpectedStreamMessage(stderrLoggerName, "I am an error", messagesLoggedLatch)); } Spawner spawner = new Spawner(); @@ -233,7 +233,7 @@ private void assertControllerSpawns(final Function pluginsDir } else { assertThat(processes, is(empty())); } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml index cdd1223d67f11..07bd372b60058 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml @@ -117,7 +117,7 @@ setup: id: "6" - match: _source: - integer_range: { "gte": -2147483648, "lte": 10 } + integer_range: { "gte": null, "lte": 10 } - do: get: @@ -125,7 +125,7 @@ setup: id: "7" - match: _source: - integer_range: { "gte": 1, "lte": 2147483647 } + integer_range: { "gte": 1, "lte": null } --- "Long range": @@ -220,7 +220,7 @@ setup: id: "6" - match: _source: - long_range: { "gte": -9223372036854775808, "lte": 10 } + long_range: { "gte": null, "lte": 10 } - do: get: @@ -228,7 +228,7 @@ setup: id: "7" - match: _source: - long_range: { "gte": 1, "lte": 9223372036854775807 } + long_range: { "gte": 1, "lte": null } --- "Float range": @@ -309,7 +309,7 @@ setup: id: "5" - match: _source: - float_range: { "gte": "-Infinity", "lte": 10.0 } + float_range: { "gte": null, "lte": 10.0 } - do: get: @@ -317,7 +317,7 @@ setup: id: "6" - match: _source: - float_range: { "gte": 1.0, "lte": "Infinity" } + float_range: { "gte": 1.0, "lte": null } --- "Double range": @@ -398,7 +398,7 @@ setup: id: "5" - match: _source: - double_range: { "gte": "-Infinity", "lte": 10.0 } + double_range: { "gte": null, "lte": 10.0 } - do: get: @@ -406,7 +406,7 @@ setup: id: "6" - match: _source: - double_range: { "gte": 1.0, "lte": "Infinity" } + double_range: { "gte": 1.0, "lte": null } --- "IP range": @@ -515,7 +515,7 @@ setup: id: "7" - match: _source: - ip_range: { "gte": "::", "lte": "10.10.10.10" } + ip_range: { "gte": null, "lte": "10.10.10.10" } - do: get: @@ -523,7 +523,7 @@ setup: id: "8" - match: _source: - ip_range: { "gte": "2001:db8::", "lte": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" } + ip_range: { "gte": "2001:db8::", "lte": null } --- "Date range": @@ -646,7 +646,7 @@ setup: id: "8" - match: _source: - date_range: { "gte": "-292275055-05-16T16:47:04.192Z", "lte": "2017-09-05T00:00:00.000Z" } + date_range: { "gte": null, "lte": "2017-09-05T00:00:00.000Z" } - do: get: @@ -654,4 +654,4 @@ setup: id: "9" - match: _source: - date_range: { "gte": "2017-09-05T00:00:00.000Z", "lte": "+292278994-08-17T07:12:55.807Z" } + date_range: { "gte": "2017-09-05T00:00:00.000Z", "lte": null } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 919d548d6498d..48f1ecb072314 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -35,7 +35,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -251,9 +251,9 @@ public void testRolloverDryRun() throws Exception { Logger allocationServiceLogger = LogManager.getLogger(AllocationService.class); final RolloverResponse response; - try (var appender = MockLogAppender.capture(AllocationService.class)) { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + try (var mockLog = MockLog.capture(AllocationService.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "no related message logged on dry run", AllocationService.class.getName(), Level.INFO, @@ -261,7 +261,7 @@ public void testRolloverDryRun() throws Exception { ) ); response = indicesAdmin().prepareRolloverIndex("test_alias").dryRun(true).get(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } assertThat(response.getOldIndex(), equalTo("test_index-1")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index fcccc0051f0cd..bacff31b480ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -42,7 +42,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.nio.file.Path; import java.util.Arrays; @@ -387,9 +387,9 @@ public void testMessageLogging() { ) .get(); - try (var dryRunMockLog = MockLogAppender.capture(TransportClusterRerouteAction.class)) { + try (var dryRunMockLog = MockLog.capture(TransportClusterRerouteAction.class)) { dryRunMockLog.addExpectation( - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "no completed message logged on dry run", TransportClusterRerouteAction.class.getName(), Level.INFO, @@ -411,9 +411,9 @@ public void testMessageLogging() { dryRunMockLog.assertAllExpectationsMatched(); } - try (var allocateMockLog = MockLogAppender.capture(TransportClusterRerouteAction.class)) { + try (var allocateMockLog = MockLog.capture(TransportClusterRerouteAction.class)) { allocateMockLog.addExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message for first allocate empty primary", TransportClusterRerouteAction.class.getName(), Level.INFO, @@ -421,7 +421,7 @@ public void testMessageLogging() { ) ); allocateMockLog.addExpectation( - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "no message for second allocate empty primary", TransportClusterRerouteAction.class.getName(), Level.INFO, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 09dd564d864db..445cbda9feb6a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; @@ -138,9 +138,9 @@ public Path nodeConfigPath(int nodeOrdinal) { Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity() ); - try (var mockAppender = MockLogAppender.capture(JoinHelper.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") { + try (var mockLog = MockLog.capture(JoinHelper.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") { @Override public boolean innerMatch(final LogEvent event) { @@ -159,7 +159,7 @@ public boolean innerMatch(final LogEvent event) { other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); - assertBusy(mockAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } finally { other.close(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java index 215596c8130be..59e7a67687921 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java @@ -22,7 +22,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.concurrent.CountDownLatch; @@ -70,11 +70,11 @@ public void testShardLockFailure() throws Exception { try ( var ignored1 = internalCluster().getInstance(NodeEnvironment.class, node).shardLock(shardId, "blocked for test"); - var mockLogAppender = MockLogAppender.capture(IndicesClusterStateService.class); + var mockLog = MockLog.capture(IndicesClusterStateService.class); ) { final CountDownLatch countDownLatch = new CountDownLatch(1); - mockLogAppender.addExpectation(new MockLogAppender.LoggingExpectation() { + mockLog.addExpectation(new MockLog.LoggingExpectation() { int debugMessagesSeen = 0; int warnMessagesSeen = 0; @@ -108,7 +108,7 @@ public void assertMatched() {} ensureYellow(indexName); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); assertEquals(ClusterHealthStatus.YELLOW, clusterAdmin().prepareHealth(indexName).get().getStatus()); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } ensureGreen(indexName); @@ -139,10 +139,10 @@ public void testShardLockTimeout() throws Exception { try ( var ignored1 = internalCluster().getInstance(NodeEnvironment.class, node).shardLock(shardId, "blocked for test"); - var mockLogAppender = MockLogAppender.capture(IndicesClusterStateService.class); + var mockLog = MockLog.capture(IndicesClusterStateService.class); ) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "timeout message", "org.elasticsearch.indices.cluster.IndicesClusterStateService", Level.WARN, @@ -153,7 +153,7 @@ public void testShardLockTimeout() throws Exception { ); updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name"), indexName); - assertBusy(mockLogAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); final var clusterHealthResponse = clusterAdmin().prepareHealth(indexName) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(10)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 2a58ef8eab3bc..09633a0ea1b3f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -54,7 +54,7 @@ import org.elasticsearch.search.DummyQueryBuilder; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xcontent.ObjectParser; @@ -648,9 +648,9 @@ public void testManyIndicesWithSameMapping() { reason = "verify the log output on cancelled" ) public void testCancel() throws Exception { - try (var logAppender = MockLogAppender.capture(TransportFieldCapabilitiesAction.class)) { - logAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(TransportFieldCapabilitiesAction.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "clear resources", TransportFieldCapabilitiesAction.class.getCanonicalName(), Level.TRACE, @@ -681,7 +681,7 @@ public void testCancel() throws Exception { } }, 30, TimeUnit.SECONDS); cancellable.cancel(); - assertBusy(logAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); logger.info("--> waiting for field-caps tasks to be cancelled"); assertBusy(() -> { List tasks = clusterAdmin().prepareListTasks() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index aa47663ad3886..88c94985194fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -56,7 +56,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.disruption.BusyMasterServiceDisruption; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.rest.FakeRestRequest; @@ -1264,9 +1264,9 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept final String repoName = "test-repo"; createRepository(repoName, "fs"); - try (var mockAppender = MockLogAppender.capture(BlobStoreRepository.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no warnings", BlobStoreRepository.class.getCanonicalName(), Level.WARN, "*") + try (var mockLog = MockLog.capture(BlobStoreRepository.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("no warnings", BlobStoreRepository.class.getCanonicalName(), Level.WARN, "*") ); final String index1 = "index-1"; final String index2 = "index-2"; @@ -1279,7 +1279,7 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept createSnapshot(repoName, snapshot2, List.of(index2)); clusterAdmin().prepareDeleteSnapshot(repoName, snapshot1, snapshot2).get(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index c9d77d7e41f16..7aa1603735afe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.repositories.blobstore.FileRestoreContext; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xcontent.XContentFactory; @@ -162,7 +162,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { value = "org.elasticsearch.snapshots.RestoreService:INFO" ) public void testRestoreLogging() throws IllegalAccessException { - try (var mockLogAppender = MockLogAppender.capture(RestoreService.class)) { + try (var mockLog = MockLog.capture(RestoreService.class)) { String indexName = "testindex"; String repoName = "test-restore-snapshot-repo"; String snapshotName = "test-restore-snapshot"; @@ -171,8 +171,8 @@ public void testRestoreLogging() throws IllegalAccessException { String restoredIndexName = indexName + "-restored"; String expectedValue = "expected"; - mockLogAppender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "not seen start of snapshot restore", "org.elasticsearch.snapshots.RestoreService", Level.INFO, @@ -180,8 +180,8 @@ public void testRestoreLogging() throws IllegalAccessException { ) ); - mockLogAppender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "not seen completion of snapshot restore", "org.elasticsearch.snapshots.RestoreService", Level.INFO, @@ -207,7 +207,7 @@ public void testRestoreLogging() throws IllegalAccessException { assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); ensureGreen(restoredIndexName); assertThat(client.prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -898,9 +898,9 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti index(indexName, "some_id", Map.of("foo", "bar")); assertAcked(indicesAdmin().prepareClose(indexName).get()); - try (var mockAppender = MockLogAppender.capture(FileRestoreContext.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") + try (var mockLog = MockLog.capture(FileRestoreContext.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") ); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) @@ -909,7 +909,7 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti .setWaitForCompletion(true) .get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java index 40dc9cbf6ff9f..4d2d310955a3d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Collections; @@ -136,8 +136,8 @@ public void testWarningSpeedOverRecovery() throws Exception { } final String primaryNode = internalCluster().startNode(primaryNodeSettings); - try (var mockLogAppender = MockLogAppender.capture(BlobStoreRepository.class)) { - MockLogAppender.EventuallySeenEventExpectation snapshotExpectation = new MockLogAppender.EventuallySeenEventExpectation( + try (var mockLog = MockLog.capture(BlobStoreRepository.class)) { + MockLog.EventuallySeenEventExpectation snapshotExpectation = new MockLog.EventuallySeenEventExpectation( "snapshot speed over recovery speed", "org.elasticsearch.repositories.blobstore.BlobStoreRepository", Level.WARN, @@ -146,9 +146,9 @@ public void testWarningSpeedOverRecovery() throws Exception { + "rate limit will be superseded by the recovery rate limit" ); if (nodeBandwidthSettingsSet) snapshotExpectation.setExpectSeen(); - mockLogAppender.addExpectation(snapshotExpectation); + mockLog.addExpectation(snapshotExpectation); - MockLogAppender.SeenEventExpectation restoreExpectation = new MockLogAppender.SeenEventExpectation( + MockLog.SeenEventExpectation restoreExpectation = new MockLog.SeenEventExpectation( "snapshot restore speed over recovery speed", "org.elasticsearch.repositories.blobstore.BlobStoreRepository", Level.WARN, @@ -156,7 +156,7 @@ public void testWarningSpeedOverRecovery() throws Exception { + "the effective recovery rate limit [indices.recovery.max_bytes_per_sec=100mb] per second, thus the repository " + "rate limit will be superseded by the recovery rate limit" ); - mockLogAppender.addExpectation(restoreExpectation); + mockLog.addExpectation(restoreExpectation); createRepository( "test-repo", @@ -168,7 +168,7 @@ public void testWarningSpeedOverRecovery() throws Exception { ); deleteRepository("test-repo"); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java index 307219bcc667e..22e7ce9e99edd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ClusterServiceUtils; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.util.List; import java.util.concurrent.TimeUnit; @@ -31,9 +31,9 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E createIndexWithRandomDocs("test-index", randomIntBetween(1, 42)); createSnapshot("test-repo", "test-snapshot", List.of("test-index")); - try (var mockLogAppender = MockLogAppender.capture(SnapshotsService.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + try (var mockLog = MockLog.capture(SnapshotsService.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "[does-not-exist]", SnapshotsService.class.getName(), Level.INFO, @@ -41,8 +41,8 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E ) ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "[deleting test-snapshot]", SnapshotsService.class.getName(), Level.INFO, @@ -50,8 +50,8 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E ) ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "[test-snapshot deleted]", SnapshotsService.class.getName(), Level.INFO, @@ -67,7 +67,7 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E assertThat(startDeleteSnapshot("test-repo", "test-snapshot").actionGet().isAcknowledged(), is(true)); awaitNoMoreRunningOperations(); // ensure background file deletion is completed - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { deleteRepository("test-repo"); } @@ -78,9 +78,9 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { createIndexWithRandomDocs("test-index", randomIntBetween(1, 42)); createSnapshot("test-repo", "test-snapshot", List.of("test-index")); - try (var mockLogAppender = MockLogAppender.capture(SnapshotsService.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(SnapshotsService.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "[test-snapshot]", SnapshotsService.class.getName(), Level.WARN, @@ -105,7 +105,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { assertThat(e.getCause().getMessage(), containsString("exception after block")); } - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { deleteRepository("test-repo"); } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 24e40c1cd9115..c5d487ab639ea 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -169,6 +169,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_COHERE_COMPLETION_ADDED = def(8_660_00_0); public static final TransportVersion ESQL_REMOVE_ES_SOURCE_OPTIONS = def(8_661_00_0); public static final TransportVersion NODE_STATS_INGEST_BYTES = def(8_662_00_0); + public static final TransportVersion SEMANTIC_QUERY = def(8_663_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java index 3a6a2eeb08de8..6eec2f56d52f1 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java @@ -92,8 +92,13 @@ final class RequestDispatcher { this.onComplete = new RunOnce(onComplete); this.indexSelectors = ConcurrentCollections.newConcurrentMap(); for (String index : indices) { - final GroupShardsIterator shardIts = clusterService.operationRouting() - .searchShards(clusterState, new String[] { index }, null, null, null, null); + final GroupShardsIterator shardIts; + try { + shardIts = clusterService.operationRouting().searchShards(clusterState, new String[] { index }, null, null, null, null); + } catch (Exception e) { + onIndexFailure.accept(index, e); + continue; + } final IndexSelector indexResult = new IndexSelector(shardIts); if (indexResult.nodeToShards.isEmpty()) { onIndexFailure.accept(index, new NoShardAvailableActionException(null, "index [" + index + "] has no active shard copy")); @@ -168,7 +173,7 @@ private void sendRequestToNode(String nodeId, List shardIds) { assert node != null; LOGGER.debug("round {} sends field caps node request to node {} for shardIds {}", executionRound, node, shardIds); final ActionListener listener = ActionListener.wrap( - r -> onRequestResponse(shardIds, r), + this::onRequestResponse, failure -> onRequestFailure(shardIds, failure) ); final FieldCapabilitiesNodeRequest nodeRequest = new FieldCapabilitiesNodeRequest( @@ -188,7 +193,11 @@ private void sendRequestToNode(String nodeId, List shardIds) { nodeRequest, parentTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, FieldCapabilitiesNodeResponse::new, executor) + new ActionListenerResponseHandler<>( + ActionListener.runAfter(listener, () -> afterRequestsCompleted(shardIds.size())), + FieldCapabilitiesNodeResponse::new, + executor + ) ); } @@ -201,7 +210,7 @@ private void afterRequestsCompleted(int numRequests) { } } - private void onRequestResponse(List shardIds, FieldCapabilitiesNodeResponse nodeResponse) { + private void onRequestResponse(FieldCapabilitiesNodeResponse nodeResponse) { for (FieldCapabilitiesIndexResponse indexResponse : nodeResponse.getIndexResponses()) { if (indexResponse.canMatch()) { if (fieldCapsRequest.includeEmptyFields() == false) { @@ -224,7 +233,6 @@ private void onRequestResponse(List shardIds, FieldCapabilitiesNodeResp indexSelector.setFailure(e.getKey(), e.getValue()); } } - afterRequestsCompleted(shardIds.size()); } private void onRequestFailure(List shardIds, Exception e) { @@ -234,7 +242,6 @@ private void onRequestFailure(List shardIds, Exception e) { indexSelector.setFailure(shardId, e); } } - afterRequestsCompleted(shardIds.size()); } private static class IndexSelector { @@ -253,14 +260,23 @@ private static class IndexSelector { synchronized Exception getFailure() { Exception first = null; for (Exception e : failures.values()) { - first = ExceptionsHelper.useOrSuppress(first, e); + first = useOrSuppressIfDifferent(first, e); + } + return first; + } + + static Exception useOrSuppressIfDifferent(Exception first, Exception second) { + if (first == null) { + return second; + } else if (ExceptionsHelper.unwrap(first) != ExceptionsHelper.unwrap(second)) { + first.addSuppressed(second); } return first; } synchronized void setFailure(ShardId shardId, Exception failure) { assert unmatchedShardIds.contains(shardId) == false : "Shard " + shardId + " was unmatched already"; - failures.compute(shardId, (k, curr) -> ExceptionsHelper.useOrSuppress(curr, failure)); + failures.compute(shardId, (k, curr) -> useOrSuppressIfDifferent(curr, failure)); } synchronized void addUnmatchedShardId(ShardId shardId) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 569e5aec6eca3..f804ab31faf8e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -109,14 +109,14 @@ private void innerRun() { if (queryAndFetchOptimization) { assert assertConsistentWithQueryAndFetchOptimization(); // query AND fetch optimization - moveToNextPhase(reducedQueryPhase, queryResults); + moveToNextPhase(queryResults); } else { ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); // no docs to fetch -- sidestep everything and return if (scoreDocs.length == 0) { // we have to release contexts here to free up resources queryResults.asList().stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); - moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()); + moveToNextPhase(fetchResults.getAtomicArray()); } else { final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) @@ -125,7 +125,7 @@ private void innerRun() { final CountedCollector counter = new CountedCollector<>( fetchResults, docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not - () -> moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()), + () -> moveToNextPhase(fetchResults.getAtomicArray()), context ); for (int i = 0; i < docIdsToLoad.length; i++) { @@ -224,15 +224,12 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { context.getOriginalIndices(queryResult.getShardIndex()) ); } catch (Exception e) { - context.getLogger().trace("failed to release context", e); + logger.trace("failed to release context", e); } } } - private void moveToNextPhase( - SearchPhaseController.ReducedQueryPhase reducedQueryPhase, - AtomicArray fetchResultsArr - ) { + private void moveToNextPhase(AtomicArray fetchResultsArr) { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); context.addReleasable(resp::decRef); fetchResults.close(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index a99ed225b244b..19f19dc678e2e 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -22,6 +22,8 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; +import org.elasticsearch.nativeaccess.NativeAccess; +import org.elasticsearch.nativeaccess.ProcessLimits; import org.elasticsearch.node.NodeValidationException; import java.io.BufferedReader; @@ -349,7 +351,7 @@ static class MaxNumberOfThreadsCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < MAX_NUMBER_OF_THREADS_THRESHOLD) { + if (getMaxNumberOfThreads() != ProcessLimits.UNKNOWN && getMaxNumberOfThreads() < MAX_NUMBER_OF_THREADS_THRESHOLD) { final String message = String.format( Locale.ROOT, "max number of threads [%d] for user [%s] is too low, increase to at least [%d]", @@ -365,7 +367,7 @@ public BootstrapCheckResult check(BootstrapContext context) { // visible for testing long getMaxNumberOfThreads() { - return JNANatives.MAX_NUMBER_OF_THREADS; + return NativeAccess.instance().getProcessLimits().maxThreads(); } @Override @@ -378,7 +380,7 @@ static class MaxSizeVirtualMemoryCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity()) { + if (getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != ProcessLimits.UNLIMITED) { final String message = String.format( Locale.ROOT, "max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]", @@ -391,14 +393,9 @@ public BootstrapCheckResult check(BootstrapContext context) { } } - // visible for testing - long getRlimInfinity() { - return JNACLibrary.RLIM_INFINITY; - } - // visible for testing long getMaxSizeVirtualMemory() { - return JNANatives.MAX_SIZE_VIRTUAL_MEMORY; + return NativeAccess.instance().getProcessLimits().maxVirtualMemorySize(); } @Override @@ -415,7 +412,7 @@ static class MaxFileSizeCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { final long maxFileSize = getMaxFileSize(); - if (maxFileSize != Long.MIN_VALUE && maxFileSize != getRlimInfinity()) { + if (maxFileSize != Long.MIN_VALUE && maxFileSize != ProcessLimits.UNLIMITED) { final String message = String.format( Locale.ROOT, "max file size [%d] for user [%s] is too low, increase to [unlimited]", @@ -428,12 +425,8 @@ public BootstrapCheckResult check(BootstrapContext context) { } } - long getRlimInfinity() { - return JNACLibrary.RLIM_INFINITY; - } - long getMaxFileSize() { - return JNANatives.MAX_FILE_SIZE; + return NativeAccess.instance().getProcessLimits().maxVirtualMemorySize(); } @Override diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 960988db67b33..57f9ef5cb44f3 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -326,10 +326,6 @@ public boolean handle(int code) { // we've already logged this. } - Natives.trySetMaxNumberOfThreads(); - Natives.trySetMaxSizeVirtualMemory(); - Natives.trySetMaxFileSize(); - // init lucene random seed. it will use /dev/urandom where available: StringHelper.randomId(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index 12d008da493b3..b599cb488522e 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -39,13 +39,6 @@ private JNANatives() {} // Set to true, in case policy can be applied to all threads of the process (even existing ones) // otherwise they are only inherited for new threads (ES app threads) static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false; - // set to the maximum number of threads that can be created for - // the user ID that owns the running Elasticsearch process - static long MAX_NUMBER_OF_THREADS = -1; - - static long MAX_SIZE_VIRTUAL_MEMORY = Long.MIN_VALUE; - - static long MAX_FILE_SIZE = Long.MIN_VALUE; static void tryMlockall() { int errno = Integer.MIN_VALUE; @@ -105,45 +98,6 @@ static void tryMlockall() { } } - static void trySetMaxNumberOfThreads() { - if (Constants.LINUX) { - // this is only valid on Linux and the value *is* different on OS X - // see /usr/include/sys/resource.h on OS X - // on Linux the resource RLIMIT_NPROC means *the number of threads* - // this is in opposition to BSD-derived OSes - final int rlimit_nproc = 6; - - final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit(); - if (JNACLibrary.getrlimit(rlimit_nproc, rlimit) == 0) { - MAX_NUMBER_OF_THREADS = rlimit.rlim_cur.longValue(); - } else { - logger.warn("unable to retrieve max number of threads [" + JNACLibrary.strerror(Native.getLastError()) + "]"); - } - } - } - - static void trySetMaxSizeVirtualMemory() { - if (Constants.LINUX || Constants.MAC_OS_X) { - final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit(); - if (JNACLibrary.getrlimit(JNACLibrary.RLIMIT_AS, rlimit) == 0) { - MAX_SIZE_VIRTUAL_MEMORY = rlimit.rlim_cur.longValue(); - } else { - logger.warn("unable to retrieve max size virtual memory [" + JNACLibrary.strerror(Native.getLastError()) + "]"); - } - } - } - - static void trySetMaxFileSize() { - if (Constants.LINUX || Constants.MAC_OS_X) { - final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit(); - if (JNACLibrary.getrlimit(JNACLibrary.RLIMIT_FSIZE, rlimit) == 0) { - MAX_FILE_SIZE = rlimit.rlim_cur.longValue(); - } else { - logger.warn("unable to retrieve max file size [" + JNACLibrary.strerror(Native.getLastError()) + "]"); - } - } - } - static String rlimitToString(long value) { assert Constants.LINUX || Constants.MAC_OS_X; if (value == JNACLibrary.RLIM_INFINITY) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java index 040c50b2b74e2..01d7932a571e8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -104,30 +104,6 @@ static void tryInstallSystemCallFilter(Path tmpFile) { JNANatives.tryInstallSystemCallFilter(tmpFile); } - static void trySetMaxNumberOfThreads() { - if (JNA_AVAILABLE == false) { - logger.warn("cannot getrlimit RLIMIT_NPROC because JNA is not available"); - return; - } - JNANatives.trySetMaxNumberOfThreads(); - } - - static void trySetMaxSizeVirtualMemory() { - if (JNA_AVAILABLE == false) { - logger.warn("cannot getrlimit RLIMIT_AS because JNA is not available"); - return; - } - JNANatives.trySetMaxSizeVirtualMemory(); - } - - static void trySetMaxFileSize() { - if (JNA_AVAILABLE == false) { - logger.warn("cannot getrlimit RLIMIT_FSIZE because JNA is not available"); - return; - } - JNANatives.trySetMaxFileSize(); - } - static boolean isSystemCallFilterInstalled() { if (JNA_AVAILABLE == false) { return false; diff --git a/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java b/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java index a1b8690e7ea66..a50c53ad24b01 100644 --- a/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java @@ -103,6 +103,7 @@ protected T registerNewPage(Recycler.V v, int page, int expectedSize) { protected final void releasePage(int page) { if (recycler != null) { + assert cache[page] != null; cache[page].close(); cache[page] = null; } diff --git a/server/src/main/java/org/elasticsearch/common/util/AbstractBigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/AbstractBigByteArray.java index 6ea89ae88b215..3cb14788129eb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/AbstractBigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/AbstractBigByteArray.java @@ -10,21 +10,31 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.recycler.Recycler; +import java.io.IOException; import java.util.Arrays; abstract class AbstractBigByteArray extends AbstractBigArray { + protected static final byte[] ZERO_PAGE = new byte[PageCacheRecycler.BYTE_PAGE_SIZE]; + protected byte[][] pages; protected AbstractBigByteArray(int pageSize, BigArrays bigArrays, boolean clearOnResize, long size) { super(pageSize, bigArrays, clearOnResize); this.size = size; pages = new byte[numPages(size)][]; - for (int i = 0; i < pages.length; ++i) { - pages[i] = newBytePage(i); + Arrays.fill(pages, ZERO_PAGE); + assert assertZeroPageClean(); + } + + private static boolean assertZeroPageClean() { + for (byte b : ZERO_PAGE) { + assert b == 0 : b; } + return true; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ @@ -35,16 +45,17 @@ public void resize(long newSize) { pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF)); } for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) { - pages[i] = newBytePage(i); + pages[i] = ZERO_PAGE; } for (int i = numPages; i < pages.length && pages[i] != null; ++i) { + assert pages[i] != ZERO_PAGE; pages[i] = null; releasePage(i); } this.size = newSize; } - protected final byte[] newBytePage(int page) { + private byte[] newBytePage(int page) { if (recycler != null) { final Recycler.V v = recycler.bytePage(clearOnResize); return registerNewPage(v, page, PageCacheRecycler.BYTE_PAGE_SIZE); @@ -68,22 +79,40 @@ protected static void fillBySelfCopy(byte[] page, int fromBytes, int toBytes, in /** * Bulk copies array to paged array */ - protected void set(long index, byte[] buf, int offset, int len, byte[][] pages, int shift) { + protected void set(long index, byte[] buf, int offset, int len, int shift) { assert index + len <= size(); int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); if (indexInPage + len <= pageSize()) { - System.arraycopy(buf, offset << shift, pages[pageIndex], indexInPage << shift, len << shift); + System.arraycopy(buf, offset << shift, getPageForWriting(pageIndex), indexInPage << shift, len << shift); } else { int copyLen = pageSize() - indexInPage; - System.arraycopy(buf, offset << shift, pages[pageIndex], indexInPage, copyLen << shift); + System.arraycopy(buf, offset << shift, getPageForWriting(pageIndex), indexInPage, copyLen << shift); do { ++pageIndex; offset += copyLen; len -= copyLen; copyLen = Math.min(len, pageSize()); - System.arraycopy(buf, offset << shift, pages[pageIndex], 0, copyLen << shift); + System.arraycopy(buf, offset << shift, getPageForWriting(pageIndex), 0, copyLen << shift); } while (len > copyLen); } } + + protected byte[] getPageForWriting(int pageIndex) { + byte[] foundPage = pages[pageIndex]; + if (foundPage == ZERO_PAGE) { + foundPage = newBytePage(pageIndex); + pages[pageIndex] = foundPage; + } + return foundPage; + } + + protected void readPages(StreamInput in) throws IOException { + int remainedBytes = in.readVInt(); + for (int i = 0; i < pages.length && remainedBytes > 0; i++) { + int len = Math.min(remainedBytes, pages[0].length); + in.readBytes(getPageForWriting(i), 0, len); + remainedBytes -= len; + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index feb5109422f5a..1e8b0cc83eaa6 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -21,9 +21,11 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Streams; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import static org.elasticsearch.common.util.BigDoubleArray.VH_PLATFORM_NATIVE_DOUBLE; @@ -162,8 +164,8 @@ public BytesRef next() { } @Override - public void fillWith(StreamInput in) throws IOException { - in.readBytes(array, 0, Math.toIntExact(size())); + public void fillWith(InputStream in) throws IOException { + Streams.readFully(in, array, 0, Math.toIntExact(size())); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 78e8d71a60b20..61848769e661d 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -10,10 +10,11 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Streams; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import static org.elasticsearch.common.util.BigLongArray.writePages; @@ -49,7 +50,7 @@ public byte get(long index) { public byte set(long index, byte value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final byte ret = page[indexInPage]; page[indexInPage] = value; return ret; @@ -90,16 +91,16 @@ public void set(long index, byte[] buf, int offset, int len) { int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); if (indexInPage + len <= pageSize()) { - System.arraycopy(buf, offset, pages[pageIndex], indexInPage, len); + System.arraycopy(buf, offset, getPageForWriting(pageIndex), indexInPage, len); } else { int copyLen = pageSize() - indexInPage; - System.arraycopy(buf, offset, pages[pageIndex], indexInPage, copyLen); + System.arraycopy(buf, offset, getPageForWriting(pageIndex), indexInPage, copyLen); do { ++pageIndex; offset += copyLen; len -= copyLen; copyLen = Math.min(len, pageSize()); - System.arraycopy(buf, offset, pages[pageIndex], 0, copyLen); + System.arraycopy(buf, offset, getPageForWriting(pageIndex), 0, copyLen); } while (len > copyLen); } } @@ -112,13 +113,13 @@ public void fill(long fromIndex, long toIndex, byte value) { final int fromPage = pageIndex(fromIndex); final int toPage = pageIndex(toIndex - 1); if (fromPage == toPage) { - Arrays.fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + Arrays.fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); } else { - Arrays.fill(pages[fromPage], indexInPage(fromIndex), pages[fromPage].length, value); + Arrays.fill(getPageForWriting(fromPage), indexInPage(fromIndex), pages[fromPage].length, value); for (int i = fromPage + 1; i < toPage; ++i) { - Arrays.fill(pages[i], value); + Arrays.fill(getPageForWriting(i), value); } - Arrays.fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value); + Arrays.fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); } } @@ -153,11 +154,11 @@ public BytesRef next() { } @Override - public void fillWith(StreamInput in) throws IOException { + public void fillWith(InputStream in) throws IOException { for (int i = 0; i < pages.length - 1; i++) { - in.readBytes(pages[i], 0, PAGE_SIZE_IN_BYTES); + Streams.readFully(in, getPageForWriting(i), 0, PAGE_SIZE_IN_BYTES); } - in.readBytes(pages[pages.length - 1], 0, Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES)); + Streams.readFully(in, getPageForWriting(pages.length - 1), 0, Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES)); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index 37e7b0f480e68..27dc454c85adf 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -16,7 +16,6 @@ import java.lang.invoke.VarHandle; import java.nio.ByteOrder; -import static org.elasticsearch.common.util.BigLongArray.readPages; import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.DOUBLE_PAGE_SIZE; @@ -46,7 +45,7 @@ public double get(long index) { public double set(long index, double value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final double ret = (double) VH_PLATFORM_NATIVE_DOUBLE.get(page, indexInPage << 3); VH_PLATFORM_NATIVE_DOUBLE.set(page, indexInPage << 3, value); return ret; @@ -56,7 +55,7 @@ public double set(long index, double value) { public double increment(long index, double inc) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final double newVal = (double) VH_PLATFORM_NATIVE_DOUBLE.get(page, indexInPage << 3) + inc; VH_PLATFORM_NATIVE_DOUBLE.set(page, indexInPage << 3, newVal); return newVal; @@ -75,13 +74,13 @@ public void fill(long fromIndex, long toIndex, double value) { final int fromPage = pageIndex(fromIndex); final int toPage = pageIndex(toIndex - 1); if (fromPage == toPage) { - fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); } else { - fill(pages[fromPage], indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(pages[i], 0, pageSize(), value); + fill(getPageForWriting(i), 0, pageSize(), value); } - fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); } } @@ -94,7 +93,7 @@ public static void fill(byte[] page, int from, int to, double value) { @Override public void fillWith(StreamInput in) throws IOException { - readPages(in, pages); + readPages(in); } /** Estimates the number of bytes that would be consumed by an array of the given size. */ @@ -104,7 +103,7 @@ public static long estimateRamBytes(final long size) { @Override public void set(long index, byte[] buf, int offset, int len) { - set(index, buf, offset, len, pages, 3); + set(index, buf, offset, len, 3); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java b/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java index 9fc5716c0aaa3..9502950c1d25b 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java @@ -33,7 +33,7 @@ final class BigFloatArray extends AbstractBigByteArray implements FloatArray { public float set(long index, float value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final float ret = (float) VH_PLATFORM_NATIVE_FLOAT.get(page, indexInPage << 2); VH_PLATFORM_NATIVE_FLOAT.set(page, indexInPage << 2, value); return ret; @@ -59,13 +59,13 @@ public void fill(long fromIndex, long toIndex, float value) { final int fromPage = pageIndex(fromIndex); final int toPage = pageIndex(toIndex - 1); if (fromPage == toPage) { - fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); } else { - fill(pages[fromPage], indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(pages[i], 0, pageSize(), value); + fill(getPageForWriting(i), 0, pageSize(), value); } - fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); } } @@ -83,6 +83,6 @@ public static long estimateRamBytes(final long size) { @Override public void set(long index, byte[] buf, int offset, int len) { - set(index, buf, offset, len, pages, 2); + set(index, buf, offset, len, 2); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java index cd21b85515c9d..4388cc2308905 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -16,7 +16,6 @@ import java.lang.invoke.VarHandle; import java.nio.ByteOrder; -import static org.elasticsearch.common.util.BigLongArray.readPages; import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.INT_PAGE_SIZE; @@ -50,7 +49,7 @@ public int get(long index) { public int set(long index, int value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final int ret = (int) VH_PLATFORM_NATIVE_INT.get(page, indexInPage << 2); VH_PLATFORM_NATIVE_INT.set(page, indexInPage << 2, value); return ret; @@ -60,7 +59,7 @@ public int set(long index, int value) { public int increment(long index, int inc) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final int newVal = (int) VH_PLATFORM_NATIVE_INT.get(page, indexInPage << 2) + inc; VH_PLATFORM_NATIVE_INT.set(page, indexInPage << 2, newVal); return newVal; @@ -74,19 +73,19 @@ public void fill(long fromIndex, long toIndex, int value) { final int fromPage = pageIndex(fromIndex); final int toPage = pageIndex(toIndex - 1); if (fromPage == toPage) { - fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); } else { - fill(pages[fromPage], indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(pages[i], 0, pageSize(), value); + fill(getPageForWriting(i), 0, pageSize(), value); } - fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); } } @Override public void fillWith(StreamInput in) throws IOException { - readPages(in, pages); + readPages(in); } public static void fill(byte[] page, int from, int to, int value) { @@ -108,6 +107,6 @@ public static long estimateRamBytes(final long size) { @Override public void set(long index, byte[] buf, int offset, int len) { - set(index, buf, offset, len, pages, 2); + set(index, buf, offset, len, 2); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java index 758aad450a11d..f0ccea26880c4 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -44,7 +44,7 @@ public long get(long index) { public long set(long index, long value) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final long ret = (long) VH_PLATFORM_NATIVE_LONG.get(page, indexInPage << 3); VH_PLATFORM_NATIVE_LONG.set(page, indexInPage << 3, value); return ret; @@ -54,7 +54,7 @@ public long set(long index, long value) { public long increment(long index, long inc) { final int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); - final byte[] page = pages[pageIndex]; + final byte[] page = getPageForWriting(pageIndex); final long newVal = (long) VH_PLATFORM_NATIVE_LONG.get(page, indexInPage << 3) + inc; VH_PLATFORM_NATIVE_LONG.set(page, indexInPage << 3, newVal); return newVal; @@ -76,13 +76,13 @@ public void fill(long fromIndex, long toIndex, long value) { final int fromPage = pageIndex(fromIndex); final int toPage = pageIndex(toIndex - 1); if (fromPage == toPage) { - fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); } else { - fill(pages[fromPage], indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(pages[i], 0, pageSize(), value); + fill(getPageForWriting(i), 0, pageSize(), value); } - fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); } } @@ -100,7 +100,7 @@ public static long estimateRamBytes(final long size) { @Override public void set(long index, byte[] buf, int offset, int len) { - set(index, buf, offset, len, pages, 3); + set(index, buf, offset, len, 3); } @Override @@ -110,16 +110,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public void fillWith(StreamInput in) throws IOException { - readPages(in, pages); - } - - static void readPages(StreamInput in, byte[][] pages) throws IOException { - int remainedBytes = in.readVInt(); - for (int i = 0; i < pages.length && remainedBytes > 0; i++) { - int len = Math.min(remainedBytes, pages[0].length); - in.readBytes(pages[i], 0, len); - remainedBytes -= len; - } + readPages(in); } static void writePages(StreamOutput out, long size, byte[][] pages, int bytesPerValue) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java index 861aa4f9c7eea..cb2b10632d08b 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; +import java.io.InputStream; import java.nio.ByteBuffer; /** @@ -55,7 +56,7 @@ static ByteArray readFrom(StreamInput in) throws IOException { /** * Fills this ByteArray with bytes from the given input stream */ - void fillWith(StreamInput in) throws IOException; + void fillWith(InputStream in) throws IOException; /** * Returns a BytesRefIterator for this ByteArray. This method allows diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java index abb13b5395333..ce0f5bdfedd40 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.io.InputStream; import static org.elasticsearch.common.util.BigArrays.indexIsInt; @@ -96,7 +97,7 @@ public BytesRefIterator iterator() { } @Override - public void fillWith(StreamInput in) { + public void fillWith(InputStream in) { throw new UnsupportedOperationException("read-only ByteArray"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java index 9eec1b10a0635..f3a1551d098a7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java @@ -102,6 +102,10 @@ static List decodeDateRanges(BytesRef encodedRanges) thr return decodeRanges(encodedRanges, RangeType.DATE, BinaryRangeUtil::decodeLong); } + static List decodeIntegerRanges(BytesRef encodedRanges) throws IOException { + return decodeRanges(encodedRanges, RangeType.INTEGER, BinaryRangeUtil::decodeInt); + } + static List decodeRanges( BytesRef encodedRanges, RangeType rangeType, @@ -184,6 +188,14 @@ static byte[] encodeLong(long number) { return encode(number, sign); } + static int decodeInt(byte[] bytes, int offset, int length) { + // We encode integers same as longs but we know + // that during parsing we got actual integers. + // So every decoded long should be inside the range of integers. + long longValue = decodeLong(bytes, offset, length); + return Math.toIntExact(longValue); + } + static long decodeLong(byte[] bytes, int offset, int length) { boolean isNegative = (bytes[offset] & 128) == 0; // Start by masking off the last three bits of the first byte - that's the start of our number diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index b9230c835cb59..81fd26f4cda52 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -17,6 +17,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -351,15 +352,9 @@ && matchPatternsAreDefined(match, pathMatch, unmatchMappingType)) .toArray(XContentFieldType[]::new); final MatchType matchType = MatchType.fromString(matchPattern); - List allPatterns = Stream.of(match.stream(), unmatch.stream(), pathMatch.stream(), pathUnmatch.stream()) - .flatMap(s -> s) - .toList(); - for (String pattern : allPatterns) { - // no need to check return value - the method impls either have side effects (set header warnings) - // or throw an exception that should be sent back to the user - matchType.validate(pattern, name); - } - + // no need to check return value - the method impls either have side effects (set header warnings) + // or throw an exception that should be sent back to the user + Stream.of(match, unmatch, pathMatch, pathUnmatch).flatMap(Collection::stream).forEach(pattern -> matchType.validate(pattern, name)); return new DynamicTemplate( name, pathMatch, @@ -427,13 +422,13 @@ private DynamicTemplate( boolean runtimeMapping ) { this.name = name; - this.pathMatch = pathMatch; - this.pathUnmatch = pathUnmatch; - this.match = match; - this.unmatch = unmatch; + this.pathMatch = List.copyOf(pathMatch); + this.pathUnmatch = List.copyOf(pathUnmatch); + this.match = List.copyOf(match); + this.unmatch = List.copyOf(unmatch); this.matchType = matchType; - this.matchMappingType = matchMappingType; - this.unmatchMappingType = unmatchMappingType; + this.matchMappingType = List.copyOf(matchMappingType); + this.unmatchMappingType = List.copyOf(unmatchMappingType); this.xContentFieldTypes = xContentFieldTypes; this.mapping = mapping; this.runtimeMapping = runtimeMapping; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 04675bb770df8..4bd78ede7015c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -558,21 +558,42 @@ public Object getTo() { public XContentBuilder toXContent(XContentBuilder builder, DateFormatter dateFormatter) throws IOException { builder.startObject(); - if (includeFrom) { - builder.field("gte"); + // Default range bounds for double and float ranges + // are infinities which are not valid inputs for range field. + // As such it is not possible to specify them manually, + // and they must come from defaults kicking in + // when the bound is null or not present. + // Therefore, range should be represented in that way in source too + // to enable reindexing. + // + // We apply this logic to all range types for consistency. + if (from.equals(type.minValue())) { + assert includeFrom : "Range bounds were not properly adjusted during parsing"; + // Null value which will be parsed as a default + builder.nullField("gte"); } else { - builder.field("gt"); + if (includeFrom) { + builder.field("gte"); + } else { + builder.field("gt"); + } + var valueWithAdjustment = includeFrom ? from : type.nextDown(from); + builder.value(type.formatValue(valueWithAdjustment, dateFormatter)); } - Object f = includeFrom || from.equals(type.minValue()) ? from : type.nextDown(from); - builder.value(type.formatValue(f, dateFormatter)); - if (includeTo) { - builder.field("lte"); + if (to.equals(type.maxValue())) { + assert includeTo : "Range bounds were not properly adjusted during parsing"; + // Null value which will be parsed as a default + builder.nullField("lte"); } else { - builder.field("lt"); + if (includeTo) { + builder.field("lte"); + } else { + builder.field("lt"); + } + var valueWithAdjustment = includeTo ? to : type.nextUp(to); + builder.value(type.formatValue(valueWithAdjustment, dateFormatter)); } - Object t = includeTo || to.equals(type.maxValue()) ? to : type.nextUp(to); - builder.value(type.formatValue(t, dateFormatter)); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java index 10818632f3ceb..bd307445c9717 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java @@ -570,12 +570,13 @@ public BytesRef encodeRanges(Set ranges) throws IOExcept @Override public List decodeRanges(BytesRef bytes) throws IOException { - return LONG.decodeRanges(bytes); + return BinaryRangeUtil.decodeIntegerRanges(bytes); } @Override public Double doubleValue(Object endpointValue) { - return LONG.doubleValue(endpointValue); + assert endpointValue instanceof Integer; + return ((Integer) endpointValue).doubleValue(); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java index d58bf6fad4eed..ab39bdaf7b9f5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; @@ -285,10 +286,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (bytesIngested > 0 || bytesProduced > 0) { - builder.field("ingested_in_bytes", bytesIngested); - builder.field("produced_in_bytes", bytesProduced); - } + builder.humanReadableField( + "ingested_as_first_pipeline_in_bytes", + "ingested_as_first_pipeline", + ByteSizeValue.ofBytes(bytesIngested) + ); + builder.humanReadableField( + "produced_as_first_pipeline_in_bytes", + "produced_as_first_pipeline", + ByteSizeValue.ofBytes(bytesProduced) + ); return builder; } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index f7a2a605a18bd..e0b2aa61d9c1c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -144,111 +145,111 @@ public RepositoriesService( /** * Registers new repository in the cluster *

- * This method can be only called on the master node. It tries to create a new repository on the master - * and if it was successful it adds new repository to cluster metadata. + * This method can be only called on the master node. + * It tries to create a new repository on the master, and if it was successful, it adds a new repository to cluster metadata. * * @param request register repository request - * @param listener register repository listener + * @param responseListener register repository listener */ - public void registerRepository(final PutRepositoryRequest request, final ActionListener listener) { + public void registerRepository(final PutRepositoryRequest request, final ActionListener responseListener) { assert lifecycle.started() : "Trying to register new repository but service is in state [" + lifecycle.state() + "]"; validateRepositoryName(request.name()); - // Trying to create the new repository on master to make sure it works - try { - validateRepositoryCanBeCreated(request); - } catch (Exception e) { - listener.onFailure(e); - return; - } + // Aggregated result of two asynchronous operations when the cluster acknowledged and state changed + record RegisterRepositoryTaskResult(AcknowledgedResponse ackResponse, boolean changed) {} - final ListenableFuture acknowledgementStep = new ListenableFuture<>(); - final ListenableFuture publicationStep = new ListenableFuture<>(); // Boolean==changed. + SubscribableListener - if (request.verify()) { + // Trying to create the new repository on master to make sure it works + .newForked(validationStep -> ActionListener.completeWith(validationStep, () -> { + validateRepositoryCanBeCreated(request); + return null; + })) // When publication has completed (and all acks received or timed out) then verify the repository. // (if acks timed out then acknowledgementStep completes before the master processes this cluster state, hence why we have // to wait for the publication to be complete too) - final ListenableFuture> verifyStep = new ListenableFuture<>(); - publicationStep.addListener( - listener.delegateFailureAndWrap( - (delegate, changed) -> acknowledgementStep.addListener( - delegate.delegateFailureAndWrap((l, clusterStateUpdateResponse) -> { - if (clusterStateUpdateResponse.isAcknowledged() && changed) { - // The response was acknowledged - all nodes should know about the new repository, let's verify them - verifyRepository(request.name(), verifyStep); - } else { - verifyStep.onResponse(null); + .andThen((clusterUpdateStep, ignored) -> { + final ListenableFuture acknowledgementStep = new ListenableFuture<>(); + final ListenableFuture publicationStep = new ListenableFuture<>(); // Boolean==changed. + submitUnbatchedTask( + "put_repository [" + request.name() + "]", + new RegisterRepositoryTask(this, request, acknowledgementStep) { + @Override + public void onFailure(Exception e) { + logger.warn(() -> "failed to create repository [" + request.name() + "]", e); + publicationStep.onFailure(e); + super.onFailure(e); + } + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + // repository is created on both master and data nodes + return discoveryNode.isMasterNode() || discoveryNode.canContainData(); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + if (changed) { + if (found) { + logger.info("updated repository [{}]", request.name()); + } else { + logger.info("put repository [{}]", request.name()); + } } - }) + publicationStep.onResponse(oldState != newState); + } + } + ); + publicationStep.addListener( + clusterUpdateStep.delegateFailureAndWrap( + (stateChangeListener, changed) -> acknowledgementStep.addListener( + stateChangeListener.map(acknowledgedResponse -> new RegisterRepositoryTaskResult(acknowledgedResponse, changed)) + ) ) - ) - ); + ); + }) + .andThen((verificationStep, taskResult) -> { + if (request.verify() == false) { + verificationStep.onResponse(taskResult.ackResponse); + } else { + SubscribableListener - // When verification has completed, get the repository data for the first time - final ListenableFuture getRepositoryDataStep = new ListenableFuture<>(); - verifyStep.addListener( - listener.delegateFailureAndWrap( - (l, ignored) -> threadPool.generic() - .execute( - ActionRunnable.wrap( - getRepositoryDataStep, - ll -> repository(request.name()).getRepositoryData( - EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO contemplate threading, do we need to fork, see #101445? - ll + .>newForked(verifyRepositoryStep -> { + if (taskResult.ackResponse.isAcknowledged() && taskResult.changed) { + verifyRepository(request.name(), verifyRepositoryStep); + } else { + verifyRepositoryStep.onResponse(null); + } + }) + // When verification has completed, get the repository data for the first time + .andThen( + (getRepositoryDataStep, ignored) -> threadPool.generic() + .execute( + ActionRunnable.wrap( + getRepositoryDataStep, + ll -> repository(request.name()).getRepositoryData( + // TODO contemplate threading, do we need to fork, see #101445? + EsExecutors.DIRECT_EXECUTOR_SERVICE, + ll + ) + ) ) + ) + // When the repository metadata is ready, update the repository UUID stored in the cluster state, if available + .andThen( + (updateRepoUuidStep, repositoryData) -> updateRepositoryUuidInMetadata( + clusterService, + request.name(), + repositoryData, + updateRepoUuidStep ) ) - ) - ); - - // When the repository metadata is ready, update the repository UUID stored in the cluster state, if available - final ListenableFuture updateRepoUuidStep = new ListenableFuture<>(); - getRepositoryDataStep.addListener( - listener.delegateFailureAndWrap( - (l, repositoryData) -> updateRepositoryUuidInMetadata( - clusterService, - request.name(), - repositoryData, - updateRepoUuidStep - ) - ) - ); - - // Finally respond to the outer listener with the response from the original cluster state update - updateRepoUuidStep.addListener(listener.delegateFailureAndWrap((l, ignored) -> acknowledgementStep.addListener(l))); - - } else { - acknowledgementStep.addListener(listener); - } - - submitUnbatchedTask("put_repository [" + request.name() + "]", new RegisterRepositoryTask(this, request, acknowledgementStep) { - @Override - public void onFailure(Exception e) { - logger.warn(() -> "failed to create repository [" + request.name() + "]", e); - publicationStep.onFailure(e); - super.onFailure(e); - } - - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - // repository is created on both master and data nodes - return discoveryNode.isMasterNode() || discoveryNode.canContainData(); - } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - if (changed) { - if (found) { - logger.info("updated repository [{}]", request.name()); - } else { - logger.info("put repository [{}]", request.name()); - } + .andThenApply(uuidUpdated -> taskResult.ackResponse) + .addListener(verificationStep); } - publicationStep.onResponse(oldState != newState); - } - }); + }) + .addListener(responseListener); } /** diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index f4067b7eb7560..9bacf19a9169d 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -9,7 +9,9 @@ package org.elasticsearch.search; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.OrdinalMap; +import org.apache.lucene.index.PointValues; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -17,6 +19,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; +import org.apache.lucene.util.NumericUtils; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.routing.IndexRouting; @@ -32,6 +35,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.mapper.IdLoader; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -245,12 +249,34 @@ static long getFieldCardinality(IndexFieldData indexFieldData, DirectoryReade if (ordinalMap != null) { return ordinalMap.getValueCount(); } - if (directoryReader.leaves().size() == 0) { + if (directoryReader.leaves().isEmpty()) { return 0; } return global.load(directoryReader.leaves().get(0)).getOrdinalsValues().getValueCount(); } + } else if (indexFieldData instanceof IndexNumericFieldData indexNumericFieldData) { + final IndexNumericFieldData.NumericType type = indexNumericFieldData.getNumericType(); + try { + if (type == IndexNumericFieldData.NumericType.INT || type == IndexNumericFieldData.NumericType.SHORT) { + final IndexReader reader = directoryReader.getContext().reader(); + final byte[] min = PointValues.getMinPackedValue(reader, indexFieldData.getFieldName()); + final byte[] max = PointValues.getMaxPackedValue(reader, indexFieldData.getFieldName()); + if (min != null && max != null) { + return NumericUtils.sortableBytesToInt(max, 0) - NumericUtils.sortableBytesToInt(min, 0) + 1; + } + } else if (type == IndexNumericFieldData.NumericType.LONG) { + final IndexReader reader = directoryReader.getContext().reader(); + final byte[] min = PointValues.getMinPackedValue(reader, indexFieldData.getFieldName()); + final byte[] max = PointValues.getMaxPackedValue(reader, indexFieldData.getFieldName()); + if (min != null && max != null) { + return NumericUtils.sortableBytesToLong(max, 0) - NumericUtils.sortableBytesToLong(min, 0) + 1; + } + } + } catch (IOException ioe) { + return -1L; + } } + // return -1L; } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/RequestDispatcherTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/RequestDispatcherTests.java index 75d5d7fb7c55d..f5f35c52044d7 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/RequestDispatcherTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/RequestDispatcherTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -599,6 +600,63 @@ public void sendRequest( } } + public void testFailWithSameException() throws Exception { + final List allIndices = IntStream.rangeClosed(1, 5).mapToObj(n -> "index_" + n).toList(); + final ClusterState clusterState; + { + DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 10); + for (int i = 0; i < numNodes; i++) { + discoNodes.add(newNode("node_" + i, VersionUtils.randomVersion(random()), IndexVersionUtils.randomVersion())); + } + Metadata.Builder metadata = Metadata.builder(); + for (String index : allIndices) { + metadata.put( + IndexMetadata.builder(index).settings(indexSettings(IndexVersions.MINIMUM_COMPATIBLE, between(1, 10), between(0, 3))) + ); + } + clusterState = newClusterState(metadata.build(), discoNodes.build()); + } + try (TestTransportService transportService = TestTransportService.newTestTransportService()) { + final List targetIndices = randomSubsetOf(between(1, allIndices.size()), allIndices); + final ResponseCollector responseCollector = new ResponseCollector(); + boolean withFilter = randomBoolean(); + final RequestDispatcher dispatcher = new RequestDispatcher( + mockClusterService(clusterState), + transportService, + newRandomParentTask(), + randomFieldCapRequest(withFilter), + OriginalIndices.NONE, + randomNonNegativeLong(), + targetIndices.toArray(new String[0]), + transportService.threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION), + responseCollector::addIndexResponse, + responseCollector::addIndexFailure, + responseCollector::onComplete + ); + final RequestTracker requestTracker = new RequestTracker(dispatcher, clusterState.routingTable(), withFilter); + transportService.requestTracker.set(requestTracker); + + RuntimeException ex = new RuntimeException("shared"); + transportService.setTransportInterceptor(new TransportInterceptor.AsyncSender() { + @Override + public void sendRequest( + Transport.Connection connection, + String action, + TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler + ) { + Exception failure = randomFrom(ex, new RuntimeException("second"), new IllegalStateException("third")); + handler.executor().execute(() -> handler.handleException(new TransportException(failure))); + } + }); + dispatcher.execute(); + responseCollector.awaitCompletion(); + assertThat(responseCollector.failures.keySet(), equalTo(Sets.newHashSet(targetIndices))); + } + } + private static class NodeRequest { final int round; final DiscoveryNode node; diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 7b7061c0e1bc6..60e334704f1fa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; @@ -38,6 +39,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -105,12 +107,7 @@ public void testShortcutQueryAndFetchOptimization() throws Exception { null, mockSearchPhaseContext, reducedQueryPhase, - (searchResponse, scrollId) -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); - } - } + searchPhaseFactory(mockSearchPhaseContext) ); assertEquals("fetch", phase.getName()); phase.run(); @@ -238,12 +235,7 @@ public void sendExecuteFetch( null, mockSearchPhaseContext, reducedQueryPhase, - (searchResponse, scrollId) -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); - } - } + searchPhaseFactory(mockSearchPhaseContext) ); assertEquals("fetch", phase.getName()); phase.run(); @@ -352,12 +344,7 @@ public void sendExecuteFetch( null, mockSearchPhaseContext, reducedQueryPhase, - (searchResponse, scrollId) -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); - } - } + searchPhaseFactory(mockSearchPhaseContext) ); assertEquals("fetch", phase.getName()); phase.run(); @@ -583,24 +570,16 @@ public void sendExecuteFetch( listener.onFailure(new RuntimeException("BOOM")); return; } - SearchHits hits; - if (request.contextId().getId() == 321) { - fetchResult.setSearchShardTarget(shard2Target); - hits = SearchHits.unpooled( + assertEquals(321, request.contextId().getId()); + fetchResult.setSearchShardTarget(shard2Target); + fetchResult.shardResult( + SearchHits.unpooled( new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F - ); - } else { - fetchResult.setSearchShardTarget(shard1Target); - assertEquals(request, 123); - hits = SearchHits.unpooled( - new SearchHit[] { SearchHit.unpooled(42) }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - 1.0F - ); - } - fetchResult.shardResult(hits, fetchProfile(profiled)); + ), + fetchProfile(profiled) + ); listener.onResponse(fetchResult); } finally { fetchResult.decRef(); @@ -613,12 +592,7 @@ public void sendExecuteFetch( null, mockSearchPhaseContext, reducedQueryPhase, - (searchResponse, scrollId) -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); - } - } + searchPhaseFactory(mockSearchPhaseContext) ); assertEquals("fetch", phase.getName()); phase.run(); @@ -720,12 +694,7 @@ public void sendExecuteFetch( null, mockSearchPhaseContext, reducedQueryPhase, - (searchResponse, scrollId) -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); - } - } + searchPhaseFactory(mockSearchPhaseContext) ); assertEquals("fetch", phase.getName()); phase.run(); @@ -756,13 +725,24 @@ public void run() { } - private void addProfiling(boolean profiled, QuerySearchResult queryResult) { + private static BiFunction, SearchPhase> searchPhaseFactory( + MockSearchPhaseContext mockSearchPhaseContext + ) { + return (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + }; + } + + private static void addProfiling(boolean profiled, QuerySearchResult queryResult) { if (profiled) { queryResult.profileResults(new SearchProfileQueryPhaseResult(List.of(), null)); } } - private ProfileResult fetchProfile(boolean profiled) { + private static ProfileResult fetchProfile(boolean profiled) { return profiled ? new ProfileResult("fetch", "fetch", Map.of(), Map.of(), FETCH_PROFILE_TIME, List.of()) : null; } } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 6d24f8d2fe9e0..7f0cf4973b5f5 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -60,7 +60,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; @@ -495,10 +495,10 @@ public void testMasterBecomesAvailable() throws ExecutionException, InterruptedE request.decRef(); assertTrue(request.hasReferences()); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( () -> setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)), ClusterApplierService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "listener log", ClusterApplierService.class.getCanonicalName(), Level.TRACE, diff --git a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index 09ef0b6affc23..a6c5c9a67a387 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.bootstrap; import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.ReferenceDocs; @@ -20,6 +19,7 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.nativeaccess.ProcessLimits; import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; import org.hamcrest.Matcher; @@ -355,23 +355,17 @@ long getMaxNumberOfThreads() { // nothing should happen if current max number of threads is // not available - maxNumberOfThreads.set(-1); + maxNumberOfThreads.set(ProcessLimits.UNKNOWN); BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)); } public void testMaxSizeVirtualMemory() throws NodeValidationException { - final long rlimInfinity = Constants.MAC_OS_X ? 9223372036854775807L : -1L; final AtomicLong maxSizeVirtualMemory = new AtomicLong(randomIntBetween(0, Integer.MAX_VALUE)); final BootstrapChecks.MaxSizeVirtualMemoryCheck check = new BootstrapChecks.MaxSizeVirtualMemoryCheck() { @Override long getMaxSizeVirtualMemory() { return maxSizeVirtualMemory.get(); } - - @Override - long getRlimInfinity() { - return rlimInfinity; - } }; final NodeValidationException e = expectThrows( @@ -381,7 +375,7 @@ long getRlimInfinity() { assertThat(e.getMessage(), containsString("max size virtual memory")); assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); - maxSizeVirtualMemory.set(rlimInfinity); + maxSizeVirtualMemory.set(ProcessLimits.UNLIMITED); BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)); @@ -391,18 +385,12 @@ long getRlimInfinity() { } public void testMaxFileSizeCheck() throws NodeValidationException { - final long rlimInfinity = Constants.MAC_OS_X ? 9223372036854775807L : -1L; final AtomicLong maxFileSize = new AtomicLong(randomIntBetween(0, Integer.MAX_VALUE)); final BootstrapChecks.MaxFileSizeCheck check = new BootstrapChecks.MaxFileSizeCheck() { @Override long getMaxFileSize() { return maxFileSize.get(); } - - @Override - long getRlimInfinity() { - return rlimInfinity; - } }; final NodeValidationException e = expectThrows( @@ -412,7 +400,7 @@ long getRlimInfinity() { assertThat(e.getMessage(), containsString("max file size")); assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); - maxFileSize.set(rlimInfinity); + maxFileSize.set(ProcessLimits.UNLIMITED); BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)); diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index 3a8f95b868e35..21ea7f12ca601 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.io.BufferedReader; import java.io.IOException; @@ -131,9 +131,8 @@ BufferedReader getBufferedReader(Path path) throws IOException { final IOException ioException = new IOException("fatal"); when(reader.readLine()).thenThrow(ioException); final Logger logger = LogManager.getLogger("testGetMaxMapCountIOException"); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing("testGetMaxMapCountIOException")) { - appender.addExpectation( + try (var mockLog = MockLog.capture("testGetMaxMapCountIOException")) { + mockLog.addExpectation( new MessageLoggingExpectation( "expected logged I/O exception", "testGetMaxMapCountIOException", @@ -143,7 +142,7 @@ BufferedReader getBufferedReader(Path path) throws IOException { ) ); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } verify(reader).close(); } @@ -152,9 +151,8 @@ BufferedReader getBufferedReader(Path path) throws IOException { reset(reader); when(reader.readLine()).thenReturn("eof"); final Logger logger = LogManager.getLogger("testGetMaxMapCountNumberFormatException"); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing("testGetMaxMapCountNumberFormatException")) { - appender.addExpectation( + try (var mockLog = MockLog.capture("testGetMaxMapCountNumberFormatException")) { + mockLog.addExpectation( new MessageLoggingExpectation( "expected logged number format exception", "testGetMaxMapCountNumberFormatException", @@ -164,14 +162,14 @@ BufferedReader getBufferedReader(Path path) throws IOException { ) ); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } verify(reader).close(); } } - private static class MessageLoggingExpectation implements MockLogAppender.LoggingExpectation { + private static class MessageLoggingExpectation implements MockLog.LoggingExpectation { private boolean saw = false; diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 954dc0e9eb709..910c10f6b265a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -353,19 +353,19 @@ public void testDebugLogging() { for (DiscoveryNode disconnectedNode : disconnectedNodes) { transportService.disconnectFromNode(disconnectedNode); } - try (var appender = MockLogAppender.capture(NodeConnectionsService.class)) { + try (var mockLog = MockLog.capture(NodeConnectionsService.class)) { for (DiscoveryNode targetNode : targetNodes) { if (disconnectedNodes.contains(targetNode)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connecting to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, "connecting to " + targetNode ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connected to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -373,16 +373,16 @@ public void testDebugLogging() { ) ); } else { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "connecting to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, "connecting to " + targetNode ) ); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "connected to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -393,7 +393,7 @@ public void testDebugLogging() { } runTasksUntil(deterministicTaskQueue, CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(Settings.EMPTY).millis()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } for (DiscoveryNode disconnectedNode : disconnectedNodes) { @@ -406,19 +406,19 @@ public void testDebugLogging() { transportService.disconnectFromNode(disconnectedNode); } - try (var appender = MockLogAppender.capture(NodeConnectionsService.class)) { + try (var mockLog = MockLog.capture(NodeConnectionsService.class)) { for (DiscoveryNode targetNode : targetNodes) { if (disconnectedNodes.contains(targetNode) && newTargetNodes.get(targetNode.getId()) != null) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connecting to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, "connecting to " + targetNode ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connected to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -426,16 +426,16 @@ public void testDebugLogging() { ) ); } else { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "connecting to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, "connecting to " + targetNode ) ); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "connected to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -444,8 +444,8 @@ public void testDebugLogging() { ); } if (newTargetNodes.get(targetNode.getId()) == null) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "disconnected from " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -455,8 +455,8 @@ public void testDebugLogging() { } } for (DiscoveryNode targetNode : newTargetNodes) { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "disconnected from " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -464,16 +464,16 @@ public void testDebugLogging() { ) ); if (targetNodes.get(targetNode.getId()) == null) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connecting to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, "connecting to " + targetNode ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connected to " + targetNode, "org.elasticsearch.cluster.NodeConnectionsService", Level.DEBUG, @@ -486,7 +486,7 @@ public void testDebugLogging() { service.disconnectFromNodesExcept(newTargetNodes); service.connectToNodes(newTargetNodes, () -> {}); deterministicTaskQueue.runAllRunnableTasks(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java index da80a83f6ba19..734544bfb8d71 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.ClusterStateUpdaters; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; @@ -143,11 +143,11 @@ public void testWarnLoggingOnRegisterFailures() { cluster.stabilise(); final var clusterNode = cluster.getAnyLeader(); - try (var mockAppender = MockLogAppender.capture(Coordinator.class, Coordinator.CoordinatorPublication.class)) { + try (var mockLog = MockLog.capture(Coordinator.class, Coordinator.CoordinatorPublication.class)) { clusterNode.disconnect(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "write heartbeat failure", Coordinator.class.getCanonicalName(), Level.WARN, @@ -155,12 +155,12 @@ public void testWarnLoggingOnRegisterFailures() { ) ); cluster.runFor(HEARTBEAT_FREQUENCY.get(Settings.EMPTY).millis(), "warnings"); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); clusterNode.heal(); coordinatorStrategy.disruptElections = true; - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "acquire term failure", Coordinator.class.getCanonicalName(), Level.WARN, @@ -168,12 +168,12 @@ public void testWarnLoggingOnRegisterFailures() { ) ); cluster.runFor(DEFAULT_ELECTION_DELAY, "warnings"); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); coordinatorStrategy.disruptElections = false; coordinatorStrategy.disruptPublications = true; - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "verify term failure", Coordinator.CoordinatorPublication.class.getCanonicalName(), Level.WARN, @@ -181,7 +181,7 @@ public void testWarnLoggingOnRegisterFailures() { ) ); cluster.runFor(DEFAULT_ELECTION_DELAY + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "publication warnings"); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); coordinatorStrategy.disruptPublications = false; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index cc02e9aa805ee..31df801b06b53 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; @@ -650,9 +650,9 @@ public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { } public void testBootstrapStateLogging() { - try (var mockAppender = MockLogAppender.capture(ClusterBootstrapService.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ClusterBootstrapService.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "fresh node message", ClusterBootstrapService.class.getCanonicalName(), Level.INFO, @@ -674,12 +674,12 @@ public void testBootstrapStateLogging() { } ).logBootstrapState(metadataBuilder.build()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); final String infoMessagePattern = """ this node is locked into cluster UUID [test-uuid] and will not attempt further cluster bootstrapping"""; - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "bootstrapped node message", ClusterBootstrapService.class.getCanonicalName(), Level.INFO, @@ -691,7 +691,7 @@ public void testBootstrapStateLogging() { throw new AssertionError("should not be called"); }).logBootstrapState(Metadata.builder().clusterUUID("test-uuid").clusterUUIDCommitted(true).build()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); final var warningMessagePattern = """ this node is locked into cluster UUID [test-uuid] but [cluster.initial_master_nodes] is set to [node1, node2]; \ @@ -699,8 +699,8 @@ public void testBootstrapStateLogging() { for further information see \ https://www.elastic.co/guide/en/elasticsearch/reference/*/important-settings.html#initial_master_nodes"""; - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "bootstrapped node message if bootstrapping still configured", ClusterBootstrapService.class.getCanonicalName(), Level.WARN, @@ -718,10 +718,10 @@ public void testBootstrapStateLogging() { } ).logBootstrapState(Metadata.builder().clusterUUID("test-uuid").clusterUUIDCommitted(true).build()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "bootstrapped node message if bootstrapping still configured", ClusterBootstrapService.class.getCanonicalName(), Level.WARN, @@ -735,10 +735,10 @@ public void testBootstrapStateLogging() { deterministicTaskQueue.advanceTime(); } - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "bootstrapped node message if discovery type is single node ", ClusterBootstrapService.class.getCanonicalName(), Level.INFO, @@ -756,7 +756,7 @@ public void testBootstrapStateLogging() { } ).logBootstrapState(Metadata.builder().clusterUUID("test-uuid").clusterUUIDCommitted(true).build()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index f73057b1cee1d..8974f57cc40bf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.util.ArrayList; import java.util.Arrays; @@ -109,12 +109,12 @@ public void testScheduling() { final long startTimeMillis = deterministicTaskQueue.getCurrentTimeMillis(); clusterFormationFailureHelper.start(); - try (var mockLogAppender = MockLogAppender.capture(ClusterFormationFailureHelper.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("master not discovered", LOGGER_NAME, Level.WARN, "master not discovered") + try (var mockLog = MockLog.capture(ClusterFormationFailureHelper.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("master not discovered", LOGGER_NAME, Level.WARN, "master not discovered") ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "troubleshooting link", LOGGER_NAME, Level.WARN, @@ -133,7 +133,7 @@ public void testScheduling() { } assertThat(warningCount.get(), is(1L)); assertThat(deterministicTaskQueue.getCurrentTimeMillis() - startTimeMillis, is(expectedDelayMillis)); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } while (warningCount.get() < 5) { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index da7ec73fc6a99..b19cce96a2208 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -39,7 +39,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; @@ -1226,17 +1226,12 @@ public void testNodeCannotJoinIfJoinPingValidationFailsOnMaster() { List addedNodes = cluster.addNodes(randomIntBetween(1, 2)); final long previousClusterStateVersion = cluster.getAnyLeader().getLastAppliedClusterState().version(); - try (var mockAppender = MockLogAppender.capture(JoinHelper.class, Coordinator.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "failed to join", - JoinHelper.class.getCanonicalName(), - Level.INFO, - "*failed to join*" - ) + try (var mockLog = MockLog.capture(JoinHelper.class, Coordinator.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("failed to join", JoinHelper.class.getCanonicalName(), Level.INFO, "*failed to join*") ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "failed to ping", Coordinator.class.getCanonicalName(), Level.WARN, @@ -1244,7 +1239,7 @@ public void testNodeCannotJoinIfJoinPingValidationFailsOnMaster() { ) ); cluster.runFor(10000, "failing joins"); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } assertTrue(addedNodes.stream().allMatch(ClusterNode::isCandidate)); @@ -1362,12 +1357,12 @@ public void testCannotJoinClusterWithDifferentUUID() { cluster1.clusterNodes.add(newNode); - try (var mockAppender = MockLogAppender.capture(JoinHelper.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("test1", JoinHelper.class.getCanonicalName(), Level.INFO, "*failed to join*") + try (var mockLog = MockLog.capture(JoinHelper.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("test1", JoinHelper.class.getCanonicalName(), Level.INFO, "*failed to join*") ); cluster1.runFor(DEFAULT_STABILISATION_TIME, "failing join validation"); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } assertEquals(0, newNode.getLastAppliedClusterState().version()); @@ -1400,9 +1395,9 @@ public void testReportsConnectBackProblemsDuringJoining() { final var leader = cluster.getAnyLeader(); leader.addActionBlock(TransportService.HANDSHAKE_ACTION_NAME); - try (var mockAppender = MockLogAppender.capture(Coordinator.class, JoinHelper.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(Coordinator.class, JoinHelper.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "connect-back failure", Coordinator.class.getCanonicalName(), Level.WARN, @@ -1411,7 +1406,7 @@ public void testReportsConnectBackProblemsDuringJoining() { + "] but could not connect back to the joining node" ) ); - mockAppender.addExpectation(new MockLogAppender.LoggingExpectation() { + mockLog.addExpectation(new MockLog.LoggingExpectation() { boolean matched = false; @Override @@ -1468,7 +1463,7 @@ public void assertMatched() { defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) + 8 * DEFAULT_DELAY_VARIABILITY, "allowing time for join attempt" ); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } leader.clearActionBlocks(); @@ -1692,8 +1687,8 @@ protected void testLogsWarningPeriodicallyIfClusterNotFormed(String expectedMess } for (int i = scaledRandomIntBetween(1, 10); i >= 0; i--) { - try (var mockLogAppender = MockLogAppender.capture(ClusterFormationFailureHelper.class)) { - mockLogAppender.addExpectation(new MockLogAppender.LoggingExpectation() { + try (var mockLog = MockLog.capture(ClusterFormationFailureHelper.class)) { + mockLog.addExpectation(new MockLog.LoggingExpectation() { final Set nodesLogged = new HashSet<>(); @Override @@ -1725,7 +1720,7 @@ public void assertMatched() { } }); cluster.runFor(warningDelayMillis + DEFAULT_DELAY_VARIABILITY, "waiting for warning to be emitted"); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -1760,8 +1755,8 @@ public void testLogsWarningPeriodicallyIfSingleNodeClusterHasSeedHosts() { cluster.stabilise(); for (int i = scaledRandomIntBetween(1, 10); i >= 0; i--) { - try (var mockLogAppender = MockLogAppender.capture(Coordinator.class)) { - mockLogAppender.addExpectation(new MockLogAppender.LoggingExpectation() { + try (var mockLog = MockLog.capture(Coordinator.class)) { + mockLog.addExpectation(new MockLog.LoggingExpectation() { String loggedClusterUuid; @Override @@ -1778,7 +1773,7 @@ public void assertMatched() { } }); cluster.runFor(warningDelayMillis + DEFAULT_DELAY_VARIABILITY, "waiting for warning to be emitted"); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } @@ -1803,10 +1798,10 @@ public void testLogsMessagesIfPublicationDelayed() { cluster.stabilise(); final ClusterNode brokenNode = cluster.getAnyNodeExcept(cluster.getAnyLeader()); - try (var mockLogAppender = MockLogAppender.capture(Coordinator.CoordinatorPublication.class, LagDetector.class)) { + try (var mockLog = MockLog.capture(Coordinator.CoordinatorPublication.class, LagDetector.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "publication info message", Coordinator.CoordinatorPublication.class.getCanonicalName(), Level.INFO, @@ -1818,8 +1813,8 @@ public void testLogsMessagesIfPublicationDelayed() { ) ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "publication warning", Coordinator.CoordinatorPublication.class.getCanonicalName(), Level.WARN, @@ -1831,8 +1826,8 @@ public void testLogsMessagesIfPublicationDelayed() { ) ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "lag warning", LagDetector.class.getCanonicalName(), Level.WARN, @@ -1843,8 +1838,8 @@ public void testLogsMessagesIfPublicationDelayed() { ) ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "hot threads from lagging node", LagDetector.class.getCanonicalName(), Level.DEBUG, @@ -1879,7 +1874,7 @@ public String toString() { "waiting for messages to be emitted" ); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java index 8a42f574dfa91..7f665cf241230 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.HashSet; @@ -458,9 +458,9 @@ public void testSettingInitialConfigurationTriggersElection() { value = "org.elasticsearch.cluster.coordination.ClusterBootstrapService:INFO" ) public void testClusterUUIDLogging() { - try (var mockAppender = MockLogAppender.capture(ClusterBootstrapService.class); var cluster = new Cluster(randomIntBetween(1, 3))) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ClusterBootstrapService.class); var cluster = new Cluster(randomIntBetween(1, 3))) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "fresh node message", ClusterBootstrapService.class.getCanonicalName(), Level.INFO, @@ -470,11 +470,11 @@ public void testClusterUUIDLogging() { cluster.runRandomly(); cluster.stabilise(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); final var restartingNode = cluster.getAnyNode(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "restarted node message", ClusterBootstrapService.class.getCanonicalName(), Level.INFO, @@ -486,7 +486,7 @@ public void testClusterUUIDLogging() { restartingNode.close(); cluster.clusterNodes.replaceAll(cn -> cn == restartingNode ? cn.restartedNode() : cn); cluster.stabilise(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java index 1c1daa8d35c81..853990dcb5965 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.concurrent.atomic.AtomicBoolean; @@ -51,15 +51,15 @@ private void assertElectionSchedule( final AtomicBoolean electionStarted = new AtomicBoolean(); try ( - var appender = MockLogAppender.capture(ElectionSchedulerFactory.class); + var mockLog = MockLog.capture(ElectionSchedulerFactory.class); var ignored1 = electionSchedulerFactory.startElectionScheduler( initialGracePeriod, () -> assertTrue(electionStarted.compareAndSet(false, true)) ) ) { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "no zero retries message", ElectionSchedulerFactory.class.getName(), Level.INFO, @@ -68,8 +68,8 @@ private void assertElectionSchedule( ); for (int i : new int[] { 10, 20, 990 }) { // the test may stop after 1000 attempts, so might not report the 1000th failure; it definitely reports the 990th tho. - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( i + " retries message", ElectionSchedulerFactory.class.getName(), Level.INFO, @@ -123,7 +123,7 @@ private void assertElectionSchedule( lastElectionFinishTime = thisElectionStartTime + duration; } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } deterministicTaskQueue.runAllTasks(); assertFalse(electionStarted.get()); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index 89ac27699fbb4..664b74c804939 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest; @@ -346,9 +346,9 @@ public void testLatestStoredStateFailure() { joinAccumulator.handleJoinRequest(localNode, CompatibilityVersionsUtils.staticCurrent(), Set.of(), joinListener); assert joinListener.isDone() == false; - try (var mockAppender = MockLogAppender.capture(JoinHelper.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(JoinHelper.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "warning log", JoinHelper.class.getCanonicalName(), Level.WARN, @@ -356,7 +356,7 @@ public void testLatestStoredStateFailure() { ) ); joinAccumulator.close(Coordinator.Mode.LEADER); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } assertEquals("simulated", expectThrows(ElasticsearchException.class, () -> FutureUtils.get(joinListener)).getMessage()); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 544422445fdb7..e51b817bce594 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -40,7 +40,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -801,12 +801,12 @@ public void testPerNodeLogging() { final ThreadPool threadPool = new TestThreadPool("test"); try ( - var appender = MockLogAppender.capture(NodeJoinExecutor.class); + var mockLog = MockLog.capture(NodeJoinExecutor.class); var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool) ) { final var node1 = DiscoveryNodeUtils.create(UUIDs.base64UUID()); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "info message", LOGGER_NAME, Level.INFO, @@ -826,12 +826,12 @@ public void testPerNodeLogging() { TimeUnit.SECONDS ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); final var node2 = DiscoveryNodeUtils.create(UUIDs.base64UUID()); final var testReasonWithLink = new JoinReason("test", ReferenceDocs.UNSTABLE_CLUSTER_TROUBLESHOOTING); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "warn message with troubleshooting link", LOGGER_NAME, Level.WARN, @@ -862,7 +862,7 @@ public void testPerNodeLogging() { TimeUnit.SECONDS ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { TestThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java index 8396c651a3b48..41ce520dc9bb6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -116,12 +116,12 @@ public void testPerNodeLogging() { final ThreadPool threadPool = new TestThreadPool("test"); try ( - var appender = MockLogAppender.capture(NodeLeftExecutor.class); + var mockLog = MockLog.capture(NodeLeftExecutor.class); var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool) ) { final var nodeToRemove = clusterState.nodes().get("other"); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "info message", LOGGER_NAME, Level.INFO, @@ -135,7 +135,7 @@ public void testPerNodeLogging() { .submitTask("test", new NodeLeftExecutor.Task(nodeToRemove, "test reason", () -> future.onResponse(null)), null) ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { TestThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java index 7b9f0497ca282..0659b65be5844 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -83,9 +83,9 @@ public void testLogSkippedElectionIfRecentLeaderHeartbeat() throws Exception { final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); final var maxTimeSinceLastHeartbeat = TimeValue.timeValueSeconds(2 * heartbeatFrequency.seconds()); DiscoveryNodeUtils.create("master"); - try (var appender = MockLogAppender.capture(AtomicRegisterPreVoteCollector.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(AtomicRegisterPreVoteCollector.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "log emitted when skipping election", AtomicRegisterPreVoteCollector.class.getCanonicalName(), Level.INFO, @@ -116,7 +116,7 @@ protected long absoluteTimeInMillis() { preVoteCollector.start(ClusterState.EMPTY_STATE, Collections.emptyList()); assertThat(startElection.get(), is(false)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java index 7227a6b991993..9a783d802a68c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -262,9 +262,9 @@ protected long absoluteTimeInMillis() { fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); failReadingHeartbeat.set(true); - try (var mockAppender = MockLogAppender.capture(StoreHeartbeatService.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(StoreHeartbeatService.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "warning log", StoreHeartbeatService.class.getCanonicalName(), Level.WARN, @@ -272,7 +272,7 @@ protected long absoluteTimeInMillis() { ) ); heartbeatService.checkLeaderHeartbeatAndRun(() -> fail("should not be called"), hb -> {}); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java index 13cb36eb88e97..5666a2dd77a89 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -241,7 +241,7 @@ public void testNotifiesOnFailure() throws InterruptedException { @TestLogging(reason = "testing log output", value = "org.elasticsearch.cluster.routing.BatchedRerouteService:DEBUG") public void testExceptionFidelity() { - try (var mockLogAppender = MockLogAppender.capture(BatchedRerouteService.class)) { + try (var mockLog = MockLog.capture(BatchedRerouteService.class)) { clusterService.getMasterService() .setClusterStatePublisher( @@ -250,8 +250,8 @@ public void testExceptionFidelity() { // Case 1: an exception thrown from within the reroute itself - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "failure within reroute", BatchedRerouteService.class.getCanonicalName(), Level.ERROR, @@ -269,18 +269,18 @@ public void testExceptionFidelity() { .getMessage(), equalTo("simulated") ); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // None of the other cases should yield any log messages by default - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no errors", BatchedRerouteService.class.getCanonicalName(), Level.ERROR, "*") + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("no errors", BatchedRerouteService.class.getCanonicalName(), Level.ERROR, "*") ); - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no warnings", BatchedRerouteService.class.getCanonicalName(), Level.WARN, "*") + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("no warnings", BatchedRerouteService.class.getCanonicalName(), Level.WARN, "*") ); - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no info", BatchedRerouteService.class.getCanonicalName(), Level.INFO, "*") + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("no info", BatchedRerouteService.class.getCanonicalName(), Level.INFO, "*") ); // Case 2: a FailedToCommitClusterStateException (see the call to setClusterStatePublisher above) @@ -290,8 +290,8 @@ public void testExceptionFidelity() { return ClusterState.builder(s).build(); }); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "publish failure", BatchedRerouteService.class.getCanonicalName(), Level.DEBUG, @@ -306,7 +306,7 @@ public void testExceptionFidelity() { FailedToCommitClusterStateException.class, () -> publishFailureFuture.get(10, TimeUnit.SECONDS) ); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Case 3: a NotMasterException @@ -317,8 +317,8 @@ public void testExceptionFidelity() { }, future); }, 10, TimeUnit.SECONDS); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "not-master failure", BatchedRerouteService.class.getCanonicalName(), Level.DEBUG, @@ -329,7 +329,7 @@ public void testExceptionFidelity() { batchedRerouteService.reroute("not-master failure", randomFrom(EnumSet.allOf(Priority.class)), notMasterFuture); expectThrows(ExecutionException.class, NotMasterException.class, () -> notMasterFuture.get(10, TimeUnit.SECONDS)); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 1c52ec22323ae..c8c7232acd281 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -97,11 +97,11 @@ public void testLoggingOnNodeLeft() throws IllegalAccessException { assertTrue(initialState.toString(), initialState.getRoutingNodes().unassigned().isEmpty()); - try (var appender = MockLogAppender.capture(AllocationService.class)) { + try (var mockLog = MockLog.capture(AllocationService.class)) { final String dissociationReason = "node left " + randomAlphaOfLength(10); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "health change log message", AllocationService.class.getName(), Level.INFO, @@ -117,7 +117,7 @@ public void testLoggingOnNodeLeft() throws IllegalAccessException { dissociationReason ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index b81ce01f7c119..3a5aab9e80133 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; @@ -1363,29 +1363,19 @@ protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, Releasab } private void assertNoLogging(DiskThresholdMonitor monitor, Map diskUsages) throws IllegalAccessException { - try (var mockAppender = MockLogAppender.capture(DiskThresholdMonitor.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "any INFO message", - DiskThresholdMonitor.class.getCanonicalName(), - Level.INFO, - "*" - ) + try (var mockLog = MockLog.capture(DiskThresholdMonitor.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("any INFO message", DiskThresholdMonitor.class.getCanonicalName(), Level.INFO, "*") ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "any WARN message", - DiskThresholdMonitor.class.getCanonicalName(), - Level.WARN, - "*" - ) + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("any WARN message", DiskThresholdMonitor.class.getCanonicalName(), Level.WARN, "*") ); for (int i = between(1, 3); i >= 0; i--) { monitor.onNewInfo(clusterInfo(diskUsages)); } - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -1409,12 +1399,12 @@ private void assertSingleInfoMessage(DiskThresholdMonitor monitor, Map diskUsages, Level level, String message) { - try (var mockAppender = MockLogAppender.capture(DiskThresholdMonitor.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("expected message", DiskThresholdMonitor.class.getCanonicalName(), level, message) + try (var mockLog = MockLog.capture(DiskThresholdMonitor.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("expected message", DiskThresholdMonitor.class.getCanonicalName(), level, message) ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "any message of another level", DiskThresholdMonitor.class.getCanonicalName(), level == Level.INFO ? Level.WARN : Level.INFO, @@ -1423,7 +1413,7 @@ private void assertLogging(DiskThresholdMonitor monitor, Map ); monitor.onNewInfo(clusterInfo(diskUsages)); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java index cebc4860012ad..c644c0a1d1225 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.AfterClass; @@ -167,10 +167,10 @@ public String toString() { } }; - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( () -> computation.onNewInput(input1), ContinuousComputation.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "error log", ContinuousComputation.class.getCanonicalName(), Level.ERROR, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index b9cc6c8d8eefa..f114a7eecac8a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -51,7 +51,7 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -75,7 +75,7 @@ import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; -import static org.elasticsearch.test.MockLogAppender.assertThatLogger; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; @@ -1201,7 +1201,7 @@ public void testShouldLogComputationIteration() { checkIterationLogging( 999, 10L, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "Should not report long computation too early", DesiredBalanceComputer.class.getCanonicalName(), Level.INFO, @@ -1212,7 +1212,7 @@ public void testShouldLogComputationIteration() { checkIterationLogging( 1001, 10L, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "Should report long computation based on iteration count", DesiredBalanceComputer.class.getCanonicalName(), Level.INFO, @@ -1223,7 +1223,7 @@ public void testShouldLogComputationIteration() { checkIterationLogging( 61, 1000L, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "Should report long computation based on time", DesiredBalanceComputer.class.getCanonicalName(), Level.INFO, @@ -1232,7 +1232,7 @@ public void testShouldLogComputationIteration() { ); } - private void checkIterationLogging(int iterations, long eachIterationDuration, MockLogAppender.AbstractEventExpectation expectation) { + private void checkIterationLogging(int iterations, long eachIterationDuration, MockLog.AbstractEventExpectation expectation) { var mockThreadPool = mock(ThreadPool.class); var currentTime = new AtomicLong(0L); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index f50418bf20e6c..899ec425704d4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -65,7 +65,7 @@ import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.junit.BeforeClass; @@ -95,7 +95,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; -import static org.elasticsearch.test.MockLogAppender.assertThatLogger; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -1287,7 +1287,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { assertThatLogger( () -> reconciler.reconcile(new DesiredBalance(1, dataNode1Assignments), createRoutingAllocationFrom(clusterState)), DesiredBalanceReconciler.class, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "Should not log if all shards on desired location", DesiredBalanceReconciler.class.getCanonicalName(), Level.WARN, @@ -1297,7 +1297,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { assertThatLogger( () -> reconciler.reconcile(new DesiredBalance(1, dataNode2Assignments), createRoutingAllocationFrom(clusterState)), DesiredBalanceReconciler.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "Should log first too many shards on undesired locations", DesiredBalanceReconciler.class.getCanonicalName(), Level.WARN, @@ -1307,7 +1307,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { assertThatLogger( () -> reconciler.reconcile(new DesiredBalance(1, dataNode2Assignments), createRoutingAllocationFrom(clusterState)), DesiredBalanceReconciler.class, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "Should not log immediate second too many shards on undesired locations", DesiredBalanceReconciler.class.getCanonicalName(), Level.WARN, diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierRecordingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierRecordingServiceTests.java index be7ca6d2f0616..f6e90324d7464 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierRecordingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierRecordingServiceTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Before; @@ -146,10 +146,10 @@ public void testSlowTaskDebugLogging() { deterministicTaskQueue.getCurrentTimeMillis() + debugLoggingTimeout.millis() + between(1, 1000), slowAction::close ); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( deterministicTaskQueue::runAllTasksInTimeOrder, ClusterApplierRecordingService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "hot threads", ClusterApplierRecordingService.class.getCanonicalName(), Level.DEBUG, @@ -163,15 +163,10 @@ public void testSlowTaskDebugLogging() { randomLongBetween(0, deterministicTaskQueue.getCurrentTimeMillis() + debugLoggingTimeout.millis() - 1), fastAction::close ); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( deterministicTaskQueue::runAllTasksInTimeOrder, ClusterApplierRecordingService.class, - new MockLogAppender.UnseenEventExpectation( - "hot threads", - ClusterApplierRecordingService.class.getCanonicalName(), - Level.DEBUG, - "*" - ) + new MockLog.UnseenEventExpectation("hot threads", ClusterApplierRecordingService.class.getCanonicalName(), Level.DEBUG, "*") ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 81a167c351dd6..f8d5b727399ab 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -117,25 +117,25 @@ private void advanceTime(long millis) { @TestLogging(value = "org.elasticsearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { - try (var mockAppender = MockLogAppender.capture(ClusterApplierService.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ClusterApplierService.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test1", ClusterApplierService.class.getCanonicalName(), Level.DEBUG, "*processing [test1]: took [1s] no change in cluster state" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2", ClusterApplierService.class.getCanonicalName(), Level.TRACE, "*failed to execute cluster state applier in [2s]*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test3", ClusterApplierService.class.getCanonicalName(), Level.DEBUG, @@ -180,23 +180,23 @@ public void onFailure(Exception e) { fail(); } }); - assertBusy(mockAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } } @TestLogging(value = "org.elasticsearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { - try (var mockAppender = MockLogAppender.capture(ClusterApplierService.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + try (var mockLog = MockLog.capture(ClusterApplierService.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "test1 shouldn't see because setting is too low", ClusterApplierService.class.getCanonicalName(), Level.WARN, "*cluster state applier task [test1] took [*] which is above the warn threshold of *" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2", ClusterApplierService.class.getCanonicalName(), Level.WARN, @@ -204,8 +204,8 @@ public void testLongClusterStateUpdateLogging() throws Exception { + "[running task [test2]] took [*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test4", ClusterApplierService.class.getCanonicalName(), Level.WARN, @@ -280,7 +280,7 @@ public void onFailure(Exception e) { }); latch.await(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index bb5c7848d476c..bebfce3d14899 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -52,7 +52,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.ReachabilityChecker; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.tasks.MockTaskManager; @@ -374,25 +374,25 @@ public void clusterStatePublished(ClusterState newClusterState) { @TestLogging(value = "org.elasticsearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { - try (var mockAppender = MockLogAppender.capture(MasterService.class); var masterService = createMasterService(true)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(MasterService.class); var masterService = createMasterService(true)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test1 start", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test1]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test1 computation", MasterService.class.getCanonicalName(), Level.DEBUG, "took [1s] to compute cluster state update for [test1]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test1 notification", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -400,32 +400,32 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2 start", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test2]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2 failure", MasterService.class.getCanonicalName(), Level.TRACE, "failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2 computation", MasterService.class.getCanonicalName(), Level.DEBUG, "took [2s] to compute cluster state update for [test2]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2 notification", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -433,24 +433,24 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test3 start", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test3]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test3 computation", MasterService.class.getCanonicalName(), Level.DEBUG, "took [3s] to compute cluster state update for [test3]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test3 notification", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -458,8 +458,8 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test4", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -522,7 +522,7 @@ public void onFailure(Exception e) { fail(); } }); - assertBusy(mockAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } } @@ -1109,7 +1109,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(); try ( - var mockAppender = MockLogAppender.capture(MasterService.class); + var mockLog = MockLog.capture(MasterService.class); MasterService masterService = new MasterService( settings, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -1123,48 +1123,48 @@ protected boolean publicationMayFail() { } } ) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "test1 shouldn't log because it was fast enough", MasterService.class.getCanonicalName(), Level.WARN, "*took*test1*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test2", MasterService.class.getCanonicalName(), Level.WARN, "*took [*] to compute cluster state update for [test2], which exceeds the warn threshold of [10s]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test3", MasterService.class.getCanonicalName(), Level.WARN, "*took [*] to compute cluster state update for [test3], which exceeds the warn threshold of [10s]" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test4", MasterService.class.getCanonicalName(), Level.WARN, "*took [*] to compute cluster state update for [test4], which exceeds the warn threshold of [10s]" ) ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "test5 should not log despite publishing slowly", MasterService.class.getCanonicalName(), Level.WARN, "*took*test5*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test6 should log due to slow and failing publication", MasterService.class.getCanonicalName(), Level.WARN, @@ -1325,7 +1325,7 @@ public void onFailure(Exception e) { } }); latch.await(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -1717,7 +1717,7 @@ public void testStarvationLogging() throws Exception { final long startTimeMillis = relativeTimeInMillis; final long taskDurationMillis = TimeValue.timeValueSeconds(1).millis(); - try (MasterService masterService = createMasterService(true); var mockAppender = MockLogAppender.capture(MasterService.class)) { + try (MasterService masterService = createMasterService(true); var mockLog = MockLog.capture(MasterService.class)) { final AtomicBoolean keepRunning = new AtomicBoolean(true); final CyclicBarrier cyclicBarrier = new CyclicBarrier(2); final Runnable awaitNextTask = () -> { @@ -1760,18 +1760,18 @@ public void onFailure(Exception e) { }); // check that a warning is logged after 5m - final MockLogAppender.EventuallySeenEventExpectation expectation1 = new MockLogAppender.EventuallySeenEventExpectation( + final MockLog.EventuallySeenEventExpectation expectation1 = new MockLog.EventuallySeenEventExpectation( "starvation warning", MasterService.class.getCanonicalName(), Level.WARN, "pending task queue has been nonempty for [5m/300000ms] which is longer than the warn threshold of [300000ms];" + " there are currently [2] pending tasks, the oldest of which has age [*" ); - mockAppender.addExpectation(expectation1); + mockLog.addExpectation(expectation1); while (relativeTimeInMillis - startTimeMillis < warnThresholdMillis) { awaitNextTask.run(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } expectation1.setExpectSeen(); @@ -1779,21 +1779,21 @@ public void onFailure(Exception e) { // the master service thread is somewhere between completing the previous task and starting the next one, which is when the // logging happens, so we must wait for another task to run too to ensure that the message was logged awaitNextTask.run(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // check that another warning is logged after 10m - final MockLogAppender.EventuallySeenEventExpectation expectation2 = new MockLogAppender.EventuallySeenEventExpectation( + final MockLog.EventuallySeenEventExpectation expectation2 = new MockLog.EventuallySeenEventExpectation( "starvation warning", MasterService.class.getCanonicalName(), Level.WARN, "pending task queue has been nonempty for [10m/600000ms] which is longer than the warn threshold of [300000ms];" + " there are currently [2] pending tasks, the oldest of which has age [*" ); - mockAppender.addExpectation(expectation2); + mockLog.addExpectation(expectation2); while (relativeTimeInMillis - startTimeMillis < warnThresholdMillis * 2) { awaitNextTask.run(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } expectation2.setExpectSeen(); @@ -1801,7 +1801,7 @@ public void onFailure(Exception e) { // the master service thread is somewhere between completing the previous task and starting the next one, which is when the // logging happens, so we must wait for another task to run too to ensure that the message was logged awaitNextTask.run(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // now stop the starvation and clean up keepRunning.set(false); @@ -1815,7 +1815,7 @@ public void onFailure(Exception e) { reason = "to ensure that we log the right batch description, which only happens at DEBUG level" ) public void testBatchedUpdateSummaryLogging() throws Exception { - try (var mockAppender = MockLogAppender.capture(MasterService.class); var masterService = createMasterService(true)) { + try (var mockLog = MockLog.capture(MasterService.class); var masterService = createMasterService(true)) { final var barrier = new CyclicBarrier(2); final var blockingTask = new ClusterStateUpdateTask() { @@ -1872,8 +1872,8 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) { for (int task = 0; task < 2; task++) { smallBatchQueue.submitTask("source-" + source, new Task("task-" + source + "-" + task), null); } - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "mention of tasks source-" + source, MasterService.class.getCanonicalName(), Level.DEBUG, @@ -1889,8 +1889,8 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) { manySourceQueue.submitTask("source-" + source, new Task("task-" + task), null); } } - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "truncated description of batch with many sources", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -1912,8 +1912,8 @@ public boolean innerMatch(LogEvent event) { for (int task = 0; task < 2048; task++) { manyTasksPerSourceQueue.submitTask("unique-source", new Task("task-" + task), null); } - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "truncated description of batch with many tasks from a single source", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -1930,7 +1930,7 @@ public boolean innerMatch(LogEvent event) { assertTrue(smallBatchExecutor.semaphore.tryAcquire(4, 10, TimeUnit.SECONDS)); assertTrue(manySourceExecutor.semaphore.tryAcquire(2048, 10, TimeUnit.SECONDS)); assertTrue(manyTasksPerSourceExecutor.semaphore.tryAcquire(2048, 10, TimeUnit.SECONDS)); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -2249,14 +2249,12 @@ public void execute(Runnable command) { }; try ( - var appender = MockLogAppender.capture(MasterService.class); + var mockLog = MockLog.capture(MasterService.class); var masterService = createMasterService(true, null, threadPool, threadPoolExecutor) ) { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation("warning", MasterService.class.getCanonicalName(), Level.WARN, "*") - ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation(new MockLog.UnseenEventExpectation("warning", MasterService.class.getCanonicalName(), Level.WARN, "*")); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "debug", MasterService.class.getCanonicalName(), Level.DEBUG, @@ -2293,7 +2291,7 @@ public void onFailure(Exception e) { assertFalse(deterministicTaskQueue.hasRunnableTasks()); assertFalse(deterministicTaskQueue.hasDeferredTasks()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java b/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java index a192fb344dabb..33349a4d6b3e4 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java @@ -13,9 +13,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; -import org.elasticsearch.test.MockLogAppender.LoggingExpectation; -import org.elasticsearch.test.MockLogAppender.SeenEventExpectation; +import org.elasticsearch.test.MockLog; +import org.elasticsearch.test.MockLog.LoggingExpectation; +import org.elasticsearch.test.MockLog.SeenEventExpectation; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -61,15 +61,14 @@ public static void restoreLoggerState() { private void assertLogged(Runnable loggingCode, LoggingExpectation... expectations) { Logger testLogger = LogManager.getLogger(""); Level savedLevel = testLogger.getLevel(); - MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing("")) { + try (var mockLog = MockLog.capture("")) { Loggers.setLevel(testLogger, Level.ALL); for (var expectation : expectations) { - mockAppender.addExpectation(expectation); + mockLog.addExpectation(expectation); } loggingCode.run(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(testLogger, savedLevel); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 57e93514689e6..8c6b2692e1ca1 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; @@ -1433,9 +1433,9 @@ public void testLogSettingUpdate() throws Exception { ); final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - try (var mockLogAppender = MockLogAppender.capture(IndexScopedSettings.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(IndexScopedSettings.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.common.settings.IndexScopedSettings", Level.INFO, @@ -1451,7 +1451,7 @@ public boolean innerMatch(LogEvent event) { newIndexMeta("index1", Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s").build()) ); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java index 8e62a9306a3d4..a42e09a1c080b 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -93,9 +93,9 @@ public void testFilteredSettingIsNotLogged() throws Exception { Setting filteredSetting = Setting.simpleString("key", Property.Filtered); assertExpectedLogMessages( (testLogger) -> Setting.logSettingUpdate(filteredSetting, newSettings, oldSettings, testLogger), - new MockLogAppender.SeenEventExpectation("secure logging", "org.elasticsearch.test", Level.INFO, "updating [key]"), - new MockLogAppender.UnseenEventExpectation("unwanted old setting name", "org.elasticsearch.test", Level.INFO, "*old*"), - new MockLogAppender.UnseenEventExpectation("unwanted new setting name", "org.elasticsearch.test", Level.INFO, "*new*") + new MockLog.SeenEventExpectation("secure logging", "org.elasticsearch.test", Level.INFO, "updating [key]"), + new MockLog.UnseenEventExpectation("unwanted old setting name", "org.elasticsearch.test", Level.INFO, "*old*"), + new MockLog.UnseenEventExpectation("unwanted new setting name", "org.elasticsearch.test", Level.INFO, "*new*") ); } @@ -106,22 +106,16 @@ public void testRegularSettingUpdateIsFullyLogged() throws Exception { Setting regularSetting = Setting.simpleString("key"); assertExpectedLogMessages( (testLogger) -> Setting.logSettingUpdate(regularSetting, newSettings, oldSettings, testLogger), - new MockLogAppender.SeenEventExpectation( - "regular logging", - "org.elasticsearch.test", - Level.INFO, - "updating [key] from [old] to [new]" - ) + new MockLog.SeenEventExpectation("regular logging", "org.elasticsearch.test", Level.INFO, "updating [key] from [old] to [new]") ); } - private void assertExpectedLogMessages(Consumer consumer, MockLogAppender.LoggingExpectation... expectations) { + private void assertExpectedLogMessages(Consumer consumer, MockLog.LoggingExpectation... expectations) { Logger testLogger = LogManager.getLogger("org.elasticsearch.test"); - MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing("org.elasticsearch.test")) { - Arrays.stream(expectations).forEach(appender::addExpectation); + try (var mockLog = MockLog.capture("org.elasticsearch.test")) { + Arrays.stream(expectations).forEach(mockLog::addExpectation); consumer.accept(testLogger); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java index 115425ff63a91..8ca96aff9c3e5 100644 --- a/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.threadpool.TestThreadPool; @@ -153,9 +153,9 @@ public void testLogsFullConnectionFailureAfterSuccessfulHandshake() throws Excep FailureListener failureListener = new FailureListener(); - try (var mockAppender = MockLogAppender.capture(HandshakingTransportAddressConnector.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(HandshakingTransportAddressConnector.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "message", HandshakingTransportAddressConnector.class.getCanonicalName(), Level.WARN, @@ -171,7 +171,7 @@ public void testLogsFullConnectionFailureAfterSuccessfulHandshake() throws Excep handshakingTransportAddressConnector.connectToRemoteMasterNode(discoveryAddress, failureListener); assertThat(failureListener.getFailureMessage(), containsString("simulated")); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index dcd57a34db315..598351a32dc48 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest; @@ -808,9 +808,9 @@ public void testLogsWarningsIfActiveForLongEnough() throws IllegalAccessExceptio final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + VERBOSITY_INCREASE_TIMEOUT_SETTING.get(Settings.EMPTY) .millis(); - try (var appender = MockLogAppender.capture(PeerFinder.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(PeerFinder.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "discovery result", "org.elasticsearch.discovery.PeerFinder", Level.WARN, @@ -826,7 +826,7 @@ public boolean innerMatch(LogEvent event) { deterministicTaskQueue.advanceTime(); runAllRunnableTasks(); } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -841,9 +841,9 @@ public void testLogsStackTraceInDiscoveryResultMessages() throws IllegalAccessEx final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + VERBOSITY_INCREASE_TIMEOUT_SETTING.get(Settings.EMPTY) .millis(); - try (var appender = MockLogAppender.capture(PeerFinder.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(PeerFinder.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "discovery result", "org.elasticsearch.discovery.PeerFinder", Level.DEBUG, @@ -858,10 +858,10 @@ public boolean innerMatch(LogEvent event) { deterministicTaskQueue.advanceTime(); runAllRunnableTasks(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "discovery result", "org.elasticsearch.discovery.PeerFinder", Level.WARN, @@ -877,7 +877,7 @@ public boolean innerMatch(LogEvent event) { deterministicTaskQueue.advanceTime(); runAllRunnableTasks(); } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -897,7 +897,7 @@ public void testEventuallyLogsIfReturnedMasterIsUnreachable() { final DiscoveryNode unreachableMaster = newDiscoveryNode("unreachable-master"); transportAddressConnector.unreachableAddresses.add(unreachableMaster.getAddress()); - MockLogAppender.assertThatLogger(() -> { + MockLog.assertThatLogger(() -> { while (deterministicTaskQueue.getCurrentTimeMillis() <= endTime) { deterministicTaskQueue.advanceTime(); runAllRunnableTasks(); @@ -908,7 +908,7 @@ public void testEventuallyLogsIfReturnedMasterIsUnreachable() { } }, PeerFinder.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "discovery result", "org.elasticsearch.discovery.PeerFinder", Level.WARN, diff --git a/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java index f166c9777f3af..9fb017a17a325 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -215,9 +215,9 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost closeables.push(transportService); recreateSeedHostsResolver(transportService); - try (var appender = MockLogAppender.capture(SeedHostsResolver.class)) { - appender.addExpectation( - new MockLogAppender.ExceptionSeenEventExpectation( + try (var mockLog = MockLog.capture(SeedHostsResolver.class)) { + mockLog.addExpectation( + new MockLog.ExceptionSeenEventExpectation( getTestName(), SeedHostsResolver.class.getCanonicalName(), Level.WARN, @@ -228,7 +228,7 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost ); assertThat(seedHostsResolver.resolveHosts(Collections.singletonList(hostname)), empty()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -285,9 +285,9 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost closeables.push(transportService); recreateSeedHostsResolver(transportService); - try (var appender = MockLogAppender.capture(SeedHostsResolver.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(SeedHostsResolver.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( getTestName(), SeedHostsResolver.class.getCanonicalName(), Level.WARN, @@ -297,7 +297,7 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost ) ); assertThat(seedHostsResolver.resolveHosts(Arrays.asList("hostname1", "hostname2")), hasSize(1)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { latch.countDown(); } @@ -402,9 +402,9 @@ public BoundTransportAddress boundAddress() { closeables.push(transportService); recreateSeedHostsResolver(transportService); - try (var appender = MockLogAppender.capture(SeedHostsResolver.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(SeedHostsResolver.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( getTestName(), SeedHostsResolver.class.getCanonicalName(), Level.WARN, @@ -417,7 +417,7 @@ public BoundTransportAddress boundAddress() { assertThat(transportAddresses, hasSize(1)); // only one of the two is valid and will be used assertThat(transportAddresses.get(0).getAddress(), equalTo("127.0.0.1")); assertThat(transportAddresses.get(0).getPort(), equalTo(9301)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 7fbf5d18bf72a..247f60b7228e3 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.NodeRoles; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; @@ -132,17 +132,17 @@ public void testShardLock() throws Exception { Index index = new Index("foo", "fooUUID"); - try (var appender = MockLogAppender.capture(NodeEnvironment.class); var lock = env.shardLock(new ShardId(index, 0), "1")) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(NodeEnvironment.class); var lock = env.shardLock(new ShardId(index, 0), "1")) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "hot threads logging", NODE_ENVIRONMENT_LOGGER_NAME, Level.DEBUG, "hot threads while failing to obtain shard lock for [foo][0]: obtaining shard lock for [2] timed out after *" ) ); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "second attempt should be suppressed due to throttling", NODE_ENVIRONMENT_LOGGER_NAME, Level.DEBUG, @@ -163,7 +163,7 @@ public void testShardLock() throws Exception { () -> env.lockAllForIndex(index, idxSettings, "3", randomIntBetween(0, 10)) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } // can lock again? diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index 1db9dbd2b5709..d2ad92320cada 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -63,7 +63,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOError; @@ -1186,7 +1186,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { null, clusterState, writer, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1202,7 +1202,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { null, clusterState, writer, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "should see warning above threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1218,7 +1218,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { null, clusterState, writer, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1236,7 +1236,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { null, clusterState, writer, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "should see warning at reduced threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1270,7 +1270,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { clusterState, newClusterState, writer, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "should see warning at threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1289,7 +1289,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { null, clusterState, writer, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1302,7 +1302,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { clusterState, newClusterState, writer, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "should not see warning below threshold", PersistedClusterStateService.class.getCanonicalName(), Level.WARN, @@ -1540,33 +1540,33 @@ public void testDebugLogging() throws IOException, IllegalAccessException { writer.writeFullStateAndCommit(randomNonNegativeLong(), ClusterState.EMPTY_STATE); } - try (var mockAppender = MockLogAppender.capture(PersistedClusterStateService.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(PersistedClusterStateService.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see checkindex message", PersistedClusterStateService.class.getCanonicalName(), Level.DEBUG, "checking cluster state integrity" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see commit message including timestamps", PersistedClusterStateService.class.getCanonicalName(), Level.DEBUG, "loading cluster state from commit [*] in [*creationTime*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see user data", PersistedClusterStateService.class.getCanonicalName(), Level.DEBUG, "cluster state commit user data: *" + PersistedClusterStateService.NODE_VERSION_KEY + "*" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see segment message including timestamp", PersistedClusterStateService.class.getCanonicalName(), Level.DEBUG, @@ -1575,7 +1575,7 @@ public void testDebugLogging() throws IOException, IllegalAccessException { ); persistedClusterStateService.loadBestOnDiskState(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } @@ -1879,17 +1879,17 @@ private void assertExpectedLogs( ClusterState previousState, ClusterState clusterState, PersistedClusterStateService.Writer writer, - MockLogAppender.LoggingExpectation expectation + MockLog.LoggingExpectation expectation ) throws IOException { - try (var mockAppender = MockLogAppender.capture(PersistedClusterStateService.class)) { - mockAppender.addExpectation(expectation); + try (var mockLog = MockLog.capture(PersistedClusterStateService.class)) { + mockLog.addExpectation(expectation); if (previousState == null) { writer.writeFullStateAndCommit(currentTerm, clusterState); } else { writer.writeIncrementalStateAndCommit(currentTerm, previousState, clusterState); } - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java index 9d5f0845521a2..9c66f1d36b4c0 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.telemetry.metric.LongGaugeMetric; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -613,33 +613,33 @@ public void testClosingWhenRunInProgress() throws Exception { } public void testLoggingHappens() { - try (var mockAppender = MockLogAppender.capture(HealthPeriodicLogger.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(HealthPeriodicLogger.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "overall", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("overall")) ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "master_is_stable", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, String.format(Locale.ROOT, "%s=\"green\"", makeHealthStatusString("master_is_stable")) ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "disk", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("disk")) ) ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "ilm", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, @@ -668,30 +668,30 @@ public void testLoggingHappens() { SchedulerEngine.Event event = new SchedulerEngine.Event(HealthPeriodicLogger.HEALTH_PERIODIC_LOGGER_JOB_NAME, 0, 0); testHealthPeriodicLogger.triggered(event); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } public void testOutputModeNoLogging() { - try (var mockAppender = MockLogAppender.capture(HealthPeriodicLogger.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + try (var mockLog = MockLog.capture(HealthPeriodicLogger.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "overall", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("overall")) ) ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "master_is_stable", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, String.format(Locale.ROOT, "%s=\"green\"", makeHealthStatusString("master_is_stable")) ) ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "disk", HealthPeriodicLogger.class.getCanonicalName(), Level.INFO, @@ -721,7 +721,7 @@ public void testOutputModeNoLogging() { SchedulerEngine.Event event = new SchedulerEngine.Event(HealthPeriodicLogger.HEALTH_PERIODIC_LOGGER_JOB_NAME, 0, 0); testHealthPeriodicLogger.triggered(event); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 0a944a3bd102f..8dcecca0f65c0 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -41,7 +41,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; @@ -581,11 +581,11 @@ public HttpStats stats() { .put(HttpTransportSettings.SETTING_HTTP_TRACE_LOG_EXCLUDE.getKey(), excludeSettings) .build() ); - try (var appender = MockLogAppender.capture(HttpTracer.class)) { + try (var mockLog = MockLog.capture(HttpTracer.class)) { final String opaqueId = UUIDs.randomBase64UUID(random()); - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "received request", HttpTracerTests.HTTP_TRACER_LOGGER, Level.TRACE, @@ -595,8 +595,8 @@ public HttpStats stats() { final boolean badRequest = randomBoolean(); - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "sent response", HttpTracerTests.HTTP_TRACER_LOGGER, Level.TRACE, @@ -610,8 +610,8 @@ public HttpStats stats() { ) ); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "received other request", HttpTracerTests.HTTP_TRACER_LOGGER, Level.TRACE, @@ -657,7 +657,7 @@ public HttpStats stats() { try (var httpChannel = fakeRestRequestExcludedPath.getHttpChannel()) { transport.incomingRequest(fakeRestRequestExcludedPath.getHttpRequest(), httpChannel); } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } @@ -671,7 +671,7 @@ public void testLogsSlowInboundProcessing() throws Exception { .put(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.getKey(), TimeValue.timeValueMillis(5)) .build(); try ( - var mockAppender = MockLogAppender.capture(AbstractHttpServerTransport.class); + var mockLog = MockLog.capture(AbstractHttpServerTransport.class); AbstractHttpServerTransport transport = new AbstractHttpServerTransport( settings, networkService, @@ -718,8 +718,8 @@ public HttpStats stats() { } } ) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected message", AbstractHttpServerTransport.class.getCanonicalName(), Level.WARN, @@ -733,7 +733,7 @@ public HttpStats stats() { .build(); transport.serverAcceptedChannel(fakeRestRequest.getHttpChannel()); transport.incomingRequest(fakeRestRequest.getHttpRequest(), fakeRestRequest.getHttpChannel()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -1350,13 +1350,13 @@ public void addCloseListener(ActionListener listener) { private static class LogExpectation implements AutoCloseable { private final Logger mockLogger; - private final MockLogAppender appender; + private final MockLog mockLog; private final int grace; private LogExpectation(int grace) { mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); Loggers.setLevel(mockLogger, Level.DEBUG); - appender = MockLogAppender.capture(AbstractHttpServerTransport.class); + mockLog = MockLog.capture(AbstractHttpServerTransport.class); this.grace = grace; } @@ -1382,9 +1382,9 @@ private LogExpectation timedOut(boolean expected) { var logger = AbstractHttpServerTransport.class.getName(); var level = Level.WARN; if (expected) { - appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + mockLog.addExpectation(new MockLog.SeenEventExpectation(name, logger, level, message)); } else { - appender.addExpectation(new MockLogAppender.UnseenEventExpectation(name, logger, level, message)); + mockLog.addExpectation(new MockLog.UnseenEventExpectation(name, logger, level, message)); } return this; } @@ -1395,9 +1395,9 @@ private LogExpectation wait(boolean expected) { var logger = AbstractHttpServerTransport.class.getName(); var level = Level.DEBUG; if (expected) { - appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + mockLog.addExpectation(new MockLog.SeenEventExpectation(name, logger, level, message)); } else { - appender.addExpectation(new MockLogAppender.UnseenEventExpectation(name, logger, level, message)); + mockLog.addExpectation(new MockLog.UnseenEventExpectation(name, logger, level, message)); } return this; } @@ -1407,17 +1407,17 @@ private LogExpectation update(int connections) { var name = "update message"; var logger = AbstractHttpServerTransport.class.getName(); var level = Level.INFO; - appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + mockLog.addExpectation(new MockLog.SeenEventExpectation(name, logger, level, message)); return this; } public void assertExpectationsMatched() { - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } @Override public void close() { - appender.close(); + mockLog.close(); } } diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index accc94a9b761c..82eb88a90873f 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; @@ -587,9 +587,9 @@ public void sendResponse(HttpResponse response, ActionListener listener) { tracer ); - try (var sendingResponseMockLog = MockLogAppender.capture(HttpTracer.class)) { + try (var sendingResponseMockLog = MockLog.capture(HttpTracer.class)) { sendingResponseMockLog.addExpectation( - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "no response should be logged", HttpTracer.class.getName(), Level.TRACE, @@ -603,9 +603,9 @@ public void sendResponse(HttpResponse response, ActionListener listener) { sendingResponseMockLog.assertAllExpectationsMatched(); } - try (var sendingResponseCompleteMockLog = MockLogAppender.capture(HttpTracer.class)) { + try (var sendingResponseCompleteMockLog = MockLog.capture(HttpTracer.class)) { sendingResponseCompleteMockLog.addExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "response should be logged", HttpTracer.class.getName(), Level.TRACE, @@ -647,9 +647,9 @@ public void sendResponse(HttpResponse response, ActionListener listener) { tracer ); - try (var mockLogAppender = MockLogAppender.capture(HttpTracer.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(HttpTracer.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "response should be logged with success = false", HttpTracer.class.getName(), Level.TRACE, @@ -658,7 +658,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { ); expectThrows(RuntimeException.class, () -> channel.sendResponse(new RestResponse(RestStatus.OK, "ignored"))); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/http/HttpTracerTests.java b/server/src/test/java/org/elasticsearch/http/HttpTracerTests.java index 5c9d9ce66a13e..0e41394a9f9a8 100644 --- a/server/src/test/java/org/elasticsearch/http/HttpTracerTests.java +++ b/server/src/test/java/org/elasticsearch/http/HttpTracerTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -36,18 +36,18 @@ public class HttpTracerTests extends ESTestCase { @TestLogging(reason = "testing trace logging", value = HTTP_TRACER_LOGGER + ":TRACE," + HTTP_BODY_TRACER_LOGGER + ":INFO") public void testLogging() { - try (var appender = MockLogAppender.capture(HttpTracer.class)) { + try (var mockLog = MockLog.capture(HttpTracer.class)) { - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "request log", HTTP_TRACER_LOGGER, Level.TRACE, "\\[\\d+]\\[idHeader]\\[GET]\\[uri] received request from \\[.*] trace.id: 4bf92f3577b34da6a3ce929d0e0e4736" ) ); - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "response log", HTTP_TRACER_LOGGER, Level.TRACE, @@ -80,7 +80,7 @@ public void testLogging() { true ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java b/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java index 61b282122fd5a..81d1c21ac2751 100644 --- a/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.hamcrest.Matchers; import java.util.concurrent.TimeUnit; @@ -39,7 +39,7 @@ private Exception getRootCause(Exception e) { public void testBeforeIndexShardRecoveryInOrder() throws Exception { var shard = newShard(randomBoolean()); - try (var appender = MockLogAppender.capture(CompositeIndexEventListener.class)) { + try (var mockLog = MockLog.capture(CompositeIndexEventListener.class)) { final var stepNumber = new AtomicInteger(); final var stepCount = between(0, 20); final var failAtStep = new AtomicInteger(-1); @@ -85,8 +85,8 @@ private void runStep() { assertEquals(stepCount, stepNumber.getAndSet(0)); if (stepCount > 0) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "warning", CompositeIndexEventListener.class.getCanonicalName(), Level.WARN, @@ -98,7 +98,7 @@ private void runStep() { final var rootCause = getRootCause(expectThrows(ElasticsearchException.class, beforeIndexShardRecoveryRunner::run)); assertEquals("simulated failure at step " + failAtStep.get(), rootCause.getMessage()); assertEquals(failAtStep.get() + 1, stepNumber.getAndSet(0)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } finally { @@ -108,7 +108,7 @@ private void runStep() { public void testAfterIndexShardRecoveryInOrder() throws Exception { var shard = newShard(randomBoolean()); - try (var appender = MockLogAppender.capture(CompositeIndexEventListener.class)) { + try (var mockLog = MockLog.capture(CompositeIndexEventListener.class)) { final var stepNumber = new AtomicInteger(); final var stepCount = between(0, 20); final var failAtStep = new AtomicInteger(-1); @@ -147,8 +147,8 @@ private void runStep() { assertEquals(stepCount, stepNumber.getAndSet(0)); if (stepCount > 0) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "warning", CompositeIndexEventListener.class.getCanonicalName(), Level.WARN, @@ -160,7 +160,7 @@ private void runStep() { final var rootCause = getRootCause(expectThrows(ElasticsearchException.class, afterIndexShardRecoveryRunner::run)); assertEquals("simulated failure at step " + failAtStep.get(), rootCause.getMessage()); assertEquals(failAtStep.get() + 1, stepNumber.getAndSet(0)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } finally { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateRangeFieldMapperTests.java index 0d971d64a8fe3..c2b1abca6cbc6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateRangeFieldMapperTests.java @@ -16,8 +16,8 @@ import java.io.IOException; import java.time.Instant; +import java.util.HashMap; import java.util.Map; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; @@ -89,11 +89,15 @@ Object toInput() { Object toExpectedSyntheticSource() { Map expectedInMillis = (Map) super.toExpectedSyntheticSource(); - return expectedInMillis.entrySet() - .stream() - .collect( - Collectors.toMap(Map.Entry::getKey, e -> expectedDateFormatter.format(Instant.ofEpochMilli((long) e.getValue()))) + Map expectedFormatted = new HashMap<>(); + for (var entry : expectedInMillis.entrySet()) { + expectedFormatted.put( + entry.getKey(), + entry.getValue() != null ? expectedDateFormatter.format(Instant.ofEpochMilli((long) entry.getValue())) : null ); + } + + return expectedFormatted; } }; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleRangeFieldMapperTests.java index 07addee5bb532..e94f061400612 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleRangeFieldMapperTests.java @@ -8,69 +8,12 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.junit.AssumptionViolatedException; import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; public class DoubleRangeFieldMapperTests extends RangeFieldMapperTests { - - public void testSyntheticSourceDefaultValues() throws IOException { - // Default range ends for double are negative and positive infinity - // and they can not pass `roundTripSyntheticSource` test. - - CheckedConsumer mapping = b -> { - b.startObject("field"); - minimalMapping(b); - b.endObject(); - }; - - var inputValues = List.of( - (builder, params) -> builder.startObject().field("gte", (Double) null).field("lte", 10).endObject(), - (builder, params) -> builder.startObject().field("lte", 20).endObject(), - (builder, params) -> builder.startObject().field("gte", 10).field("lte", (Double) null).endObject(), - (builder, params) -> builder.startObject().field("gte", 20).endObject(), - (ToXContent) (builder, params) -> builder.startObject().endObject() - ); - - var expected = List.of(new LinkedHashMap<>() { - { - put("gte", "-Infinity"); - put("lte", 10.0); - } - }, new LinkedHashMap<>() { - { - put("gte", "-Infinity"); - put("lte", 20.0); - } - }, new LinkedHashMap<>() { - { - put("gte", "-Infinity"); - put("lte", "Infinity"); - } - }, new LinkedHashMap<>() { - { - put("gte", 10.0); - put("lte", "Infinity"); - } - }, new LinkedHashMap<>() { - { - put("gte", 20.0); - put("lte", "Infinity"); - } - }); - - var source = getSourceFor(mapping, inputValues); - var actual = source.source().get("field"); - assertThat(actual, equalTo(expected)); - } - @Override protected XContentBuilder rangeSource(XContentBuilder in) throws IOException { return rangeSource(in, "0.5", "2.7"); @@ -103,6 +46,13 @@ protected TestRange randomRangeForSyntheticSourceTest() { var includeTo = randomBoolean(); Double to = randomDoubleBetween(from, Double.MAX_VALUE, false); + if (rarely()) { + from = null; + } + if (rarely()) { + to = null; + } + return new TestRange<>(rangeType(), from, to, includeFrom, includeTo); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FloatRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FloatRangeFieldMapperTests.java index 81ec49b9bc9f3..62fe603934cf3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FloatRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FloatRangeFieldMapperTests.java @@ -8,69 +8,12 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.junit.AssumptionViolatedException; import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; public class FloatRangeFieldMapperTests extends RangeFieldMapperTests { - - public void testSyntheticSourceDefaultValues() throws IOException { - // Default range ends for float are negative and positive infinity - // and they can not pass `roundTripSyntheticSource` test. - - CheckedConsumer mapping = b -> { - b.startObject("field"); - minimalMapping(b); - b.endObject(); - }; - - var inputValues = List.of( - (builder, params) -> builder.startObject().field("gte", (Float) null).field("lte", 10).endObject(), - (builder, params) -> builder.startObject().field("lte", 20).endObject(), - (builder, params) -> builder.startObject().field("gte", 10).field("lte", (Float) null).endObject(), - (builder, params) -> builder.startObject().field("gte", 20).endObject(), - (ToXContent) (builder, params) -> builder.startObject().endObject() - ); - - var expected = List.of(new LinkedHashMap<>() { - { - put("gte", "-Infinity"); - put("lte", 10.0); - } - }, new LinkedHashMap<>() { - { - put("gte", "-Infinity"); - put("lte", 20.0); - } - }, new LinkedHashMap<>() { - { - put("gte", "-Infinity"); - put("lte", "Infinity"); - } - }, new LinkedHashMap<>() { - { - put("gte", 10.0); - put("lte", "Infinity"); - } - }, new LinkedHashMap<>() { - { - put("gte", 20.0); - put("lte", "Infinity"); - } - }); - - var source = getSourceFor(mapping, inputValues); - var actual = source.source().get("field"); - assertThat(actual, equalTo(expected)); - } - @Override protected XContentBuilder rangeSource(XContentBuilder in) throws IOException { return rangeSource(in, "0.5", "2.7"); @@ -103,6 +46,13 @@ protected TestRange randomRangeForSyntheticSourceTest() { var includeTo = randomBoolean(); Float to = (float) randomDoubleBetween(from + Math.ulp(from), Float.MAX_VALUE, true); + if (rarely()) { + from = null; + } + if (rarely()) { + to = null; + } + return new TestRange<>(rangeType(), from, to, includeFrom, includeTo); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java index dbd6cd7c58105..0bfa04a95f1f5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java @@ -113,15 +113,16 @@ public void testValidSyntheticSource() throws IOException { // So this assert needs to be not sensitive to order and in "reference" // implementation of tests from MapperTestCase it is. var actual = source.source().get("field"); - if (inputValues.size() == 1) { + var expected = new HashSet<>(expectedValues); + if (expected.size() == 1) { assertEquals(expectedValues.get(0), actual); } else { assertThat(actual, instanceOf(List.class)); - assertTrue(((List) actual).containsAll(new HashSet<>(expectedValues))); + assertTrue(((List) actual).containsAll(expected)); } } - private Tuple generateValue() { + private Tuple> generateValue() { String cidr = randomCidrBlock(); InetAddresses.IpRange range = InetAddresses.parseIpRangeFromCidr(cidr); @@ -134,27 +135,73 @@ private Tuple generateValue() { if (randomBoolean()) { // CIDRs are always inclusive ranges. input = cidr; - output.put("gte", InetAddresses.toAddrString(range.lowerBound())); - output.put("lte", InetAddresses.toAddrString(range.upperBound())); + + var from = InetAddresses.toAddrString(range.lowerBound()); + inclusiveFrom(output, from); + + var to = InetAddresses.toAddrString(range.upperBound()); + inclusiveTo(output, to); } else { var fromKey = includeFrom ? "gte" : "gt"; var toKey = includeTo ? "lte" : "lt"; var from = rarely() ? null : InetAddresses.toAddrString(range.lowerBound()); var to = rarely() ? null : InetAddresses.toAddrString(range.upperBound()); - input = (ToXContent) (builder, params) -> builder.startObject().field(fromKey, from).field(toKey, to).endObject(); - - var rawFrom = from != null ? range.lowerBound() : (InetAddress) rangeType().minValue(); - var adjustedFrom = includeFrom ? rawFrom : (InetAddress) rangeType().nextUp(rawFrom); - output.put("gte", InetAddresses.toAddrString(adjustedFrom)); + input = (ToXContent) (builder, params) -> { + builder.startObject(); + if (includeFrom && from == null && randomBoolean()) { + // skip field entirely since it is equivalent to a default value + } else { + builder.field(fromKey, from); + } + + if (includeTo && to == null && randomBoolean()) { + // skip field entirely since it is equivalent to a default value + } else { + builder.field(toKey, to); + } + + return builder.endObject(); + }; + + if (includeFrom) { + inclusiveFrom(output, from); + } else { + var fromWithDefaults = from != null ? range.lowerBound() : (InetAddress) rangeType().minValue(); + var adjustedFrom = (InetAddress) rangeType().nextUp(fromWithDefaults); + output.put("gte", InetAddresses.toAddrString(adjustedFrom)); + } - var rawTo = to != null ? range.upperBound() : (InetAddress) rangeType().maxValue(); - var adjustedTo = includeTo ? rawTo : (InetAddress) rangeType().nextDown(rawTo); - output.put("lte", InetAddresses.toAddrString(adjustedTo)); + if (includeTo) { + inclusiveTo(output, to); + } else { + var toWithDefaults = to != null ? range.upperBound() : (InetAddress) rangeType().maxValue(); + var adjustedTo = (InetAddress) rangeType().nextDown(toWithDefaults); + output.put("lte", InetAddresses.toAddrString(adjustedTo)); + } } return Tuple.tuple(input, output); } + private void inclusiveFrom(Map output, String from) { + // This is helpful since different representations can map to "::" + var normalizedMin = InetAddresses.toAddrString((InetAddress) rangeType().minValue()); + if (from != null && from.equals(normalizedMin) == false) { + output.put("gte", from); + } else { + output.put("gte", null); + } + } + + private void inclusiveTo(Map output, String to) { + var normalizedMax = InetAddresses.toAddrString((InetAddress) rangeType().maxValue()); + if (to != null && to.equals(normalizedMax) == false) { + output.put("lte", to); + } else { + output.put("lte", null); + } + } + public void testInvalidSyntheticSource() { Exception e = expectThrows(IllegalArgumentException.class, () -> createDocumentMapper(syntheticSourceMapping(b -> { b.startObject("field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 65228c079e598..cda594326464d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -331,7 +331,22 @@ Object toInput() { var fromKey = includeFrom ? "gte" : "gt"; var toKey = includeTo ? "lte" : "lt"; - return (ToXContent) (builder, params) -> builder.startObject().field(fromKey, from).field(toKey, to).endObject(); + return (ToXContent) (builder, params) -> { + builder.startObject(); + if (includeFrom && from == null && randomBoolean()) { + // skip field entirely since it is equivalent to a default value + } else { + builder.field(fromKey, from); + } + + if (includeTo && to == null && randomBoolean()) { + // skip field entirely since it is equivalent to a default value + } else { + builder.field(toKey, to); + } + + return builder.endObject(); + }; } Object toExpectedSyntheticSource() { @@ -339,17 +354,25 @@ Object toExpectedSyntheticSource() { // Also, "to" field always comes first. Map output = new LinkedHashMap<>(); - var fromWithDefaults = from != null ? from : rangeType().minValue(); if (includeFrom) { - output.put("gte", fromWithDefaults); + if (from == null || from == rangeType().minValue()) { + output.put("gte", null); + } else { + output.put("gte", from); + } } else { + var fromWithDefaults = from != null ? from : rangeType().minValue(); output.put("gte", type.nextUp(fromWithDefaults)); } - var toWithDefaults = to != null ? to : rangeType().maxValue(); if (includeTo) { - output.put("lte", toWithDefaults); + if (to == null || to == rangeType().maxValue()) { + output.put("lte", null); + } else { + output.put("lte", to); + } } else { + var toWithDefaults = to != null ? to : rangeType().maxValue(); output.put("lte", type.nextDown(toWithDefaults)); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e8e09c51dfb4f..1813f2a2e1c03 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -124,7 +124,7 @@ import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSDirectoryFactory; @@ -3527,9 +3527,9 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO EMPTY_EVENT_LISTENER ); - try (var appender = MockLogAppender.capture(IndexShard.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(IndexShard.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expensive checks warning", "org.elasticsearch.index.shard.IndexShard", Level.WARN, @@ -3538,8 +3538,8 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "failure message", "org.elasticsearch.index.shard.IndexShard", Level.WARN, @@ -3557,7 +3557,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO containsString("Recovery failed") ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } // check that corrupt marker is there @@ -4061,7 +4061,7 @@ public void testFlushTimeExcludingWaiting() throws Exception { @TestLogging(reason = "testing traces of concurrent flushes", value = "org.elasticsearch.index.engine.Engine:TRACE") public void testFlushOnIdleConcurrentFlushDoesNotWait() throws Exception { - try (var mockLogAppender = MockLogAppender.capture(Engine.class)) { + try (var mockLog = MockLog.capture(Engine.class)) { CountDownLatch readyToCompleteFlushLatch = new CountDownLatch(1); IndexShard shard = newStartedShard(false, Settings.EMPTY, config -> new InternalEngine(config) { @Override @@ -4077,8 +4077,8 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl // Issue the first flushOnIdle request. The flush happens in the background using the flush threadpool. // Then wait for log message that flush acquired lock immediately - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see first flush getting lock immediately", Engine.class.getCanonicalName(), Level.TRACE, @@ -4087,14 +4087,14 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl ); shard.flushOnIdle(0); assertFalse(shard.isActive()); - assertBusy(mockLogAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); // While the first flush is happening, index one more doc (to turn the shard's active flag to true), // and issue a second flushOnIdle request which should not wait for the ongoing flush indexDoc(shard, "_doc", Integer.toString(3)); assertTrue(shard.isActive()); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see second flush returning since it will not wait for the ongoing flush", Engine.class.getCanonicalName(), Level.TRACE, @@ -4102,7 +4102,7 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl ) ); shard.flushOnIdle(0); - assertBusy(mockLogAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); // A direct call to flush (with waitIfOngoing=false) should not wait and return false immediately assertFalse(shard.flush(new FlushRequest().waitIfOngoing(false).force(false))); @@ -4111,15 +4111,15 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl readyToCompleteFlushLatch.countDown(); // Wait for first flushOnIdle to log a message that it released the flush lock - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "should see first flush releasing lock", Engine.class.getCanonicalName(), Level.TRACE, "released flush lock" ) ); - assertBusy(mockLogAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); // The second flushOnIdle (that did not happen) should have turned the active flag to true assertTrue(shard.isActive()); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java index 786534983b2cc..11e670e3ad127 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.util.ArrayList; import java.util.HashSet; @@ -43,9 +43,9 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.NODE_BANDWIDTH_RECOVERY_SETTINGS; import static org.elasticsearch.indices.recovery.RecoverySettings.TOTAL_PHYSICAL_MEMORY_OVERRIDING_TEST_SETTING; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; -import static org.elasticsearch.test.MockLogAppender.LoggingExpectation; -import static org.elasticsearch.test.MockLogAppender.SeenEventExpectation; -import static org.elasticsearch.test.MockLogAppender.assertThatLogger; +import static org.elasticsearch.test.MockLog.LoggingExpectation; +import static org.elasticsearch.test.MockLog.SeenEventExpectation; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -490,9 +490,9 @@ public void testRecoverFromSnapshotPermitsAreNotLeakedWhenRecoverFromSnapshotIsD final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - try (var mockAppender = MockLogAppender.capture(RecoverySettings.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no warnings", RecoverySettings.class.getCanonicalName(), Level.WARN, "*") + try (var mockLog = MockLog.capture(RecoverySettings.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("no warnings", RecoverySettings.class.getCanonicalName(), Level.WARN, "*") ); assertThat(recoverySettings.getUseSnapshotsDuringRecovery(), is(false)); @@ -507,7 +507,7 @@ public void testRecoverFromSnapshotPermitsAreNotLeakedWhenRecoverFromSnapshotIsD assertThat(releasable, is(notNullValue())); releasable.close(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 39d9c6e4e3ad5..b2b19f14cfd4b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -60,7 +60,7 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.TemplateScript; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -758,10 +758,10 @@ public void testPutWithErrorResponse() throws IllegalAccessException { XContentType.JSON ); ClusterState clusterState = executePut(putRequest, previousClusterState); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( () -> ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)), IngestService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "test1", IngestService.class.getCanonicalName(), Level.WARN, diff --git a/server/src/test/java/org/elasticsearch/monitor/fs/FsHealthServiceTests.java b/server/src/test/java/org/elasticsearch/monitor/fs/FsHealthServiceTests.java index f85201efe4817..c42d8c42836bf 100644 --- a/server/src/test/java/org/elasticsearch/monitor/fs/FsHealthServiceTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/fs/FsHealthServiceTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.core.PathUtilsForTesting; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -127,12 +127,12 @@ public void testLoggingOnHungIO() throws Exception { PathUtilsForTesting.installMock(fileSystem); final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - try (NodeEnvironment env = newNodeEnvironment(); var mockAppender = MockLogAppender.capture(FsHealthService.class)) { + try (NodeEnvironment env = newNodeEnvironment(); var mockLog = MockLog.capture(FsHealthService.class)) { FsHealthService fsHealthService = new FsHealthService(settings, clusterSettings, testThreadPool, env); int counter = 0; for (Path path : env.nodeDataPaths()) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "test" + ++counter, FsHealthService.class.getCanonicalName(), Level.WARN, @@ -145,7 +145,7 @@ public void testLoggingOnHungIO() throws Exception { disruptFileSystemProvider.injectIOException.set(true); fsHealthService.new FsHealthMonitor().run(); assertEquals(env.nodeDataPaths().length, disruptFileSystemProvider.getInjectedPathCount()); - assertBusy(mockAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } finally { PathUtilsForTesting.teardown(); ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); diff --git a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java index d8e4eec3b5540..6bc58cc37a314 100644 --- a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java +++ b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.http.HttpStats; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.readiness.ReadinessClientProbe; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -266,9 +266,9 @@ public void testStatusChange() throws Exception { ) .build(); event = new ClusterChangedEvent("test", nodeShuttingDownState, completeState); - try (var mockAppender = MockLogAppender.capture(ReadinessService.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ReadinessService.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "node shutting down logged", ReadinessService.class.getCanonicalName(), Level.INFO, @@ -276,10 +276,10 @@ public void testStatusChange() throws Exception { ) ); readinessService.clusterChanged(event); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "node shutting down not logged twice", ReadinessService.class.getCanonicalName(), Level.INFO, @@ -287,7 +287,7 @@ public void testStatusChange() throws Exception { ) ); readinessService.clusterChanged(event); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } assertFalse(readinessService.ready()); tcpReadinessProbeFalse(readinessService); diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index bab67233f0025..59acb227385f6 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -9,6 +9,10 @@ package org.elasticsearch.search; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntField; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -38,7 +42,9 @@ import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.plain.BinaryIndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedOrdinalsIndexFieldData; import org.elasticsearch.index.mapper.IdLoader; import org.elasticsearch.index.mapper.MappedFieldType; @@ -799,6 +805,58 @@ public void testGetFieldCardinality() throws IOException { } } + public void testGetFieldCardinalityNumeric() throws IOException { + try (BaseDirectoryWrapper dir = newDirectory()) { + final int numDocs = scaledRandomIntBetween(100, 200); + try (RandomIndexWriter w = new RandomIndexWriter(random(), dir, new IndexWriterConfig())) { + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + doc.add(new LongField("long", i, Field.Store.NO)); + doc.add(new IntField("int", i, Field.Store.NO)); + doc.add(new SortedNumericDocValuesField("no_index", i)); + w.addDocument(doc); + } + } + try (DirectoryReader reader = DirectoryReader.open(dir)) { + final SortedNumericIndexFieldData longFieldData = new SortedNumericIndexFieldData( + "long", + IndexNumericFieldData.NumericType.LONG, + IndexNumericFieldData.NumericType.LONG.getValuesSourceType(), + null, + true + ); + assertEquals(numDocs, DefaultSearchContext.getFieldCardinality(longFieldData, reader)); + + final SortedNumericIndexFieldData integerFieldData = new SortedNumericIndexFieldData( + "int", + IndexNumericFieldData.NumericType.INT, + IndexNumericFieldData.NumericType.INT.getValuesSourceType(), + null, + true + ); + assertEquals(numDocs, DefaultSearchContext.getFieldCardinality(integerFieldData, reader)); + + final SortedNumericIndexFieldData shortFieldData = new SortedNumericIndexFieldData( + "int", + IndexNumericFieldData.NumericType.SHORT, + IndexNumericFieldData.NumericType.SHORT.getValuesSourceType(), + null, + true + ); + assertEquals(numDocs, DefaultSearchContext.getFieldCardinality(shortFieldData, reader)); + + final SortedNumericIndexFieldData noIndexFieldata = new SortedNumericIndexFieldData( + "no_index", + IndexNumericFieldData.NumericType.LONG, + IndexNumericFieldData.NumericType.LONG.getValuesSourceType(), + null, + false + ); + assertEquals(-1, DefaultSearchContext.getFieldCardinality(noIndexFieldata, reader)); + } + } + } + public void testGetFieldCardinalityUnmappedField() { MapperService mapperService = mock(MapperService.class); IndexService indexService = mock(IndexService.class); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java index 95661fd24c49e..5df15577cd050 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java @@ -81,6 +81,51 @@ public void testDoubles() throws Exception { } } + public void testFloats() throws Exception { + RangeType rangeType = RangeType.FLOAT; + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0f, 5.0f, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3.1f, 4.2f, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4.2f, 13.3f, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 22.5f, 29.3f, true, true), // bucket 20, 25 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg").field("field").interval(5); + + try (IndexReader reader = w.getReader()) { + InternalHistogram histogram = searchAndReduce(reader, new AggTestConfig(aggBuilder, rangeField("field", rangeType))); + assertEquals(7, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(15d, histogram.getBuckets().get(4).getKey()); + assertEquals(0, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(20d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + + assertEquals(25d, histogram.getBuckets().get(6).getKey()); + assertEquals(1, histogram.getBuckets().get(6).getDocCount()); + } + } + } + public void testLongs() throws Exception { RangeType rangeType = RangeType.LONG; try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { @@ -126,6 +171,51 @@ public void testLongs() throws Exception { } } + public void testInts() throws Exception { + RangeType rangeType = RangeType.INTEGER; + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1, 5, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3, 4, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4, 13, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 22, 29, true, true), // bucket 20, 25 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg").field("field").interval(5); + + try (IndexReader reader = w.getReader()) { + InternalHistogram histogram = searchAndReduce(reader, new AggTestConfig(aggBuilder, rangeField("field", rangeType))); + assertEquals(7, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(15d, histogram.getBuckets().get(4).getKey()); + assertEquals(0, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(20d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + + assertEquals(25d, histogram.getBuckets().get(6).getKey()); + assertEquals(1, histogram.getBuckets().get(6).getDocCount()); + } + } + } + public void testMultipleRanges() throws Exception { RangeType rangeType = RangeType.LONG; try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index d1b9a9e4b7e82..fa700dc5d78f7 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -182,7 +182,7 @@ import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesRefRecycler; @@ -1451,13 +1451,13 @@ public void onFailure(Exception e) { }) ); - MockLogAppender.assertThatLogger(() -> { + MockLog.assertThatLogger(() -> { deterministicTaskQueue.runAllRunnableTasks(); assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); safeAwait(testListener); // shouldn't throw }, SnapshotsService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "INFO log", SnapshotsService.class.getCanonicalName(), Level.INFO, @@ -1502,13 +1502,13 @@ public void onFailure(Exception e) { }) ); - MockLogAppender.assertThatLogger(() -> { + MockLog.assertThatLogger(() -> { deterministicTaskQueue.runAllRunnableTasks(); assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); safeAwait(testListener); // shouldn't throw }, SnapshotsService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "INFO log", SnapshotsService.class.getCanonicalName(), Level.INFO, @@ -1555,13 +1555,13 @@ public void onFailure(Exception e) { }) ); - MockLogAppender.assertThatLogger(() -> { + MockLog.assertThatLogger(() -> { deterministicTaskQueue.runAllRunnableTasks(); assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); safeAwait(testListener); // shouldn't throw }, SnapshotsService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "INFO log", SnapshotsService.class.getCanonicalName(), Level.INFO, @@ -1607,13 +1607,13 @@ public void onFailure(Exception e) { }) ); - MockLogAppender.assertThatLogger(() -> { + MockLog.assertThatLogger(() -> { deterministicTaskQueue.runAllRunnableTasks(); assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); safeAwait(testListener); // shouldn't throw }, SnapshotsService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "INFO log", SnapshotsService.class.getCanonicalName(), Level.INFO, diff --git a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java index 359758de03625..c2dfd70c22bd9 100644 --- a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; @@ -55,13 +55,13 @@ public void testLogsAtDebugOnDisconnectionDuringBan() throws Exception { connection.sendRequest(requestId, action, request, options); }, childNode -> List.of( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "cannot send ban", TaskCancellationService.class.getName(), Level.DEBUG, "*cannot send ban for tasks*" + childNode.getId() + "*" ), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "cannot remove ban", TaskCancellationService.class.getName(), Level.DEBUG, @@ -81,13 +81,13 @@ public void testLogsAtDebugOnDisconnectionDuringBanRemoval() throws Exception { connection.sendRequest(requestId, action, request, options); }, childNode -> List.of( - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "cannot send ban", TaskCancellationService.class.getName(), Level.DEBUG, "*cannot send ban for tasks*" + childNode.getId() + "*" ), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "cannot remove ban", TaskCancellationService.class.getName(), Level.DEBUG, @@ -99,7 +99,7 @@ public void testLogsAtDebugOnDisconnectionDuringBanRemoval() throws Exception { private void runTest( StubbableTransport.SendRequestBehavior sendRequestBehavior, - Function> expectations + Function> expectations ) throws Exception { final ArrayList resources = new ArrayList<>(3); @@ -169,9 +169,9 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, new ChildResponseHandler(() -> parentTransportService.getTaskManager().unregister(parentTask)) ); - try (MockLogAppender appender = MockLogAppender.capture(TaskCancellationService.class)) { - for (MockLogAppender.LoggingExpectation expectation : expectations.apply(childTransportService.getLocalDiscoNode())) { - appender.addExpectation(expectation); + try (MockLog mockLog = MockLog.capture(TaskCancellationService.class)) { + for (MockLog.LoggingExpectation expectation : expectations.apply(childTransportService.getLocalDiscoNode())) { + mockLog.addExpectation(expectation); } final PlainActionFuture cancellationFuture = new PlainActionFuture<>(); @@ -183,7 +183,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, } // assert busy since failure to remove a ban may be logged after cancellation completed - assertBusy(appender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } assertTrue("child tasks did not finish in time", childTaskLock.tryLock(15, TimeUnit.SECONDS)); diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java index 094bd94aeca19..b19f058d2c6c6 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.util.concurrent.TaskExecutionTimeTrackingEsThreadPoolExecutor; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -106,17 +106,17 @@ public void testLateTimeIntervalWarningMuchLongerThanEstimatedTimeIntervalByDefa } public void testTimerThreadWarningLogging() throws Exception { - try (var appender = MockLogAppender.capture(ThreadPool.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ThreadPool.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for absolute clock", ThreadPool.class.getName(), Level.WARN, "timer thread slept for [*] on absolute clock which is above the warn threshold of [100ms]" ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for relative clock", ThreadPool.class.getName(), Level.WARN, @@ -127,7 +127,7 @@ public void testTimerThreadWarningLogging() throws Exception { final ThreadPool.CachedTimeThread thread = new ThreadPool.CachedTimeThread("[timer]", 200, 100); thread.start(); - assertBusy(appender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); thread.interrupt(); thread.join(); @@ -135,14 +135,14 @@ public void testTimerThreadWarningLogging() throws Exception { } public void testTimeChangeChecker() throws Exception { - try (var appender = MockLogAppender.capture(ThreadPool.class)) { + try (var mockLog = MockLog.capture(ThreadPool.class)) { long absoluteMillis = randomLong(); // overflow should still be handled correctly long relativeNanos = randomLong(); // overflow should still be handled correctly final ThreadPool.TimeChangeChecker timeChangeChecker = new ThreadPool.TimeChangeChecker(100, absoluteMillis, relativeNanos); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for absolute clock", ThreadPool.class.getName(), Level.WARN, @@ -152,10 +152,10 @@ public void testTimeChangeChecker() throws Exception { absoluteMillis += TimeValue.timeValueSeconds(2).millis(); timeChangeChecker.check(absoluteMillis, relativeNanos); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for relative clock", ThreadPool.class.getName(), Level.WARN, @@ -165,10 +165,10 @@ public void testTimeChangeChecker() throws Exception { relativeNanos += TimeValue.timeValueSeconds(3).nanos(); timeChangeChecker.check(absoluteMillis, relativeNanos); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for absolute clock", ThreadPool.class.getName(), Level.WARN, @@ -178,10 +178,10 @@ public void testTimeChangeChecker() throws Exception { absoluteMillis -= 1; timeChangeChecker.check(absoluteMillis, relativeNanos); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for relative clock", ThreadPool.class.getName(), Level.ERROR, @@ -195,7 +195,7 @@ public void testTimeChangeChecker() throws Exception { } catch (AssertionError e) { // yeah really shouldn't happen but at least we should log the right warning } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -270,9 +270,9 @@ public void testSchedulerWarnLogging() throws Exception { "test", Settings.builder().put(ThreadPool.SLOW_SCHEDULER_TASK_WARN_THRESHOLD_SETTING.getKey(), "10ms").build() ); - try (var appender = MockLogAppender.capture(ThreadPool.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ThreadPool.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warning for slow task", ThreadPool.class.getName(), Level.WARN, @@ -296,7 +296,7 @@ public String toString() { } }; threadPool.schedule(runnable, TimeValue.timeValueMillis(randomLongBetween(0, 300)), EsExecutors.DIRECT_EXECUTOR_SERVICE); - assertBusy(appender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } finally { assertTrue(terminate(threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index be0ed5bc36ad7..0f4a60665b35a 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -173,25 +173,25 @@ public void testDisconnectLogging() { final Releasable localConnectionRef = toClose.getAndSet(null); assertThat(localConnectionRef, notNullValue()); - try (var appender = MockLogAppender.capture(ClusterConnectionManager.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ClusterConnectionManager.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "locally-triggered close message", ClusterConnectionManager.class.getCanonicalName(), Level.DEBUG, "closing unused transport connection to [" + localClose + "]" ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "remotely-triggered close message", ClusterConnectionManager.class.getCanonicalName(), Level.INFO, "transport connection to [" + remoteClose.descriptionWithoutAttributes() + "] closed by remote" ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "shutdown-triggered close message", ClusterConnectionManager.class.getCanonicalName(), Level.TRACE, @@ -203,7 +203,7 @@ public void testDisconnectLogging() { connectionManager.disconnectFromNode(remoteClose); connectionManager.close(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index a2de986d518b8..23f9a7367298f 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -229,14 +229,9 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { // response so we must just close the connection on an error. To avoid the failure disappearing into a black hole we at least log // it. - try (var mockAppender = MockLogAppender.capture(InboundHandler.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "expected message", - EXPECTED_LOGGER_NAME, - Level.WARN, - "error processing handshake version" - ) + try (var mockLog = MockLog.capture(InboundHandler.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("expected message", EXPECTED_LOGGER_NAME, Level.WARN, "error processing handshake version") ); final AtomicBoolean isClosed = new AtomicBoolean(); @@ -260,7 +255,7 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { handler.inboundMessage(channel, requestMessage); assertTrue(isClosed.get()); assertNull(channel.getMessageCaptor().get()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -273,11 +268,11 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { public void testLogsSlowInboundProcessing() throws Exception { handler.setSlowLogThreshold(TimeValue.timeValueMillis(5L)); - try (var mockAppender = MockLogAppender.capture(InboundHandler.class)) { + try (var mockLog = MockLog.capture(InboundHandler.class)) { final TransportVersion remoteVersion = TransportVersion.current(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("expected slow request", EXPECTED_LOGGER_NAME, Level.WARN, "handling request ") + mockLog.addExpectation( + new MockLog.SeenEventExpectation("expected slow request", EXPECTED_LOGGER_NAME, Level.WARN, "handling request ") ); final long requestId = randomNonNegativeLong(); @@ -301,10 +296,10 @@ public void testLogsSlowInboundProcessing() throws Exception { requestHeader.headers = Tuple.tuple(Map.of(), Map.of()); handler.inboundMessage(channel, requestMessage); // expect no response - channel just closed on exception - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("expected slow response", EXPECTED_LOGGER_NAME, Level.WARN, "handling response ") + mockLog.addExpectation( + new MockLog.SeenEventExpectation("expected slow response", EXPECTED_LOGGER_NAME, Level.WARN, "handling response ") ); final long responseId = randomNonNegativeLong(); @@ -324,7 +319,7 @@ public void onResponseReceived(long requestId, Transport.ResponseContext context }); handler.inboundMessage(channel, new InboundMessage(responseHeader, ReleasableBytesReference.empty(), () -> {})); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index ee4b2a2d55585..dfd259f4df76c 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -536,9 +536,9 @@ public void onResponseSent(long requestId, String action, Exception error) { public void testSlowLogOutboundMessage() throws Exception { handler.setSlowLogThreshold(TimeValue.timeValueMillis(5L)); - try (var mockAppender = MockLogAppender.capture(OutboundHandler.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("expected message", EXPECTED_LOGGER_NAME, Level.WARN, "sending transport message ") + try (var mockLog = MockLog.capture(OutboundHandler.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("expected message", EXPECTED_LOGGER_NAME, Level.WARN, "sending transport message ") ); final int length = randomIntBetween(1, 100); @@ -555,7 +555,7 @@ public void sendMessage(BytesReference reference, ActionListener listener) } }, new BytesArray(randomByteArrayOfLength(length)), f); f.get(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 9f70ab879cb25..ae03f9e5f1f8a 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -46,7 +46,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; -import static org.elasticsearch.test.MockLogAppender.assertThatLogger; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; import static org.elasticsearch.test.NodeRoles.nonMasterNode; import static org.elasticsearch.test.NodeRoles.onlyRoles; @@ -1638,7 +1638,7 @@ public void testLogsConnectionResult() throws IOException { Settings.builder().putList("cluster.remote.remote_1.seeds", remote.getLocalDiscoNode().getAddress().toString()).build() ), RemoteClusterService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "Should log when connecting to remote", RemoteClusterService.class.getCanonicalName(), Level.INFO, @@ -1649,7 +1649,7 @@ public void testLogsConnectionResult() throws IOException { assertThatLogger( () -> clusterSettings.applySettings(Settings.EMPTY), RemoteClusterService.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "Should log when disconnecting from remote", RemoteClusterService.class.getCanonicalName(), Level.INFO, @@ -1660,7 +1660,7 @@ public void testLogsConnectionResult() throws IOException { assertThatLogger( () -> clusterSettings.applySettings(Settings.builder().put(randomIdentifier(), randomIdentifier()).build()), RemoteClusterService.class, - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "Should not log when changing unrelated setting", RemoteClusterService.class.getCanonicalName(), Level.INFO, diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 79ff480dad23a..e1f1483a23689 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -420,15 +420,15 @@ public void testInfoExceptionHandling() throws IllegalAccessException { false, new ElasticsearchException("simulated"), true, - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.ERROR, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.WARN, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.INFO, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, "*") + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.ERROR, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.WARN, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.INFO, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, "*") ); testExceptionHandling( new ElasticsearchException("simulated"), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.WARN, @@ -444,7 +444,7 @@ public void testInfoExceptionHandling() throws IllegalAccessException { "An existing connection was forcibly closed by remote host" }) { testExceptionHandling( new ElasticsearchException(message), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( message, "org.elasticsearch.transport.TcpTransport", Level.INFO, @@ -460,14 +460,14 @@ public void testDebugExceptionHandling() throws IllegalAccessException { false, new ElasticsearchException("simulated"), true, - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.ERROR, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.WARN, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.INFO, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, "*") + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.ERROR, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.WARN, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.INFO, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, "*") ); testExceptionHandling( new ElasticsearchException("simulated"), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.WARN, @@ -476,7 +476,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { ); testExceptionHandling( new ClosedChannelException(), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, @@ -493,7 +493,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { "An existing connection was forcibly closed by remote host" }) { testExceptionHandling( new ElasticsearchException(message), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( message, "org.elasticsearch.transport.TcpTransport", Level.INFO, @@ -506,7 +506,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { for (final String message : new String[] { "Socket is closed", "Socket closed", "SSLEngine closed already" }) { testExceptionHandling( new ElasticsearchException(message), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( message, "org.elasticsearch.transport.TcpTransport", Level.DEBUG, @@ -517,7 +517,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { testExceptionHandling( new BindException(), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, @@ -526,7 +526,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { ); testExceptionHandling( new CancelledKeyException(), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, @@ -537,14 +537,14 @@ public void testDebugExceptionHandling() throws IllegalAccessException { true, new TcpTransport.HttpRequestOnTransportException("test"), false, - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.ERROR, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.WARN, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.INFO, "*"), - new MockLogAppender.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, "*") + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.ERROR, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.WARN, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.INFO, "*"), + new MockLog.UnseenEventExpectation("message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, "*") ); testExceptionHandling( new StreamCorruptedException("simulated"), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.WARN, @@ -553,7 +553,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { ); testExceptionHandling( new TransportNotReadyException(), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.transport.TcpTransport", Level.DEBUG, @@ -562,8 +562,7 @@ public void testDebugExceptionHandling() throws IllegalAccessException { ); } - private void testExceptionHandling(Exception exception, MockLogAppender.LoggingExpectation... expectations) - throws IllegalAccessException { + private void testExceptionHandling(Exception exception, MockLog.LoggingExpectation... expectations) throws IllegalAccessException { testExceptionHandling(true, exception, true, expectations); } @@ -571,12 +570,12 @@ private void testExceptionHandling( boolean startTransport, Exception exception, boolean expectClosed, - MockLogAppender.LoggingExpectation... expectations + MockLog.LoggingExpectation... expectations ) { final TestThreadPool testThreadPool = new TestThreadPool("test"); - try (var appender = MockLogAppender.capture(TcpTransport.class)) { - for (MockLogAppender.LoggingExpectation expectation : expectations) { - appender.addExpectation(expectation); + try (var mockLog = MockLog.capture(TcpTransport.class)) { + for (MockLog.LoggingExpectation expectation : expectations) { + mockLog.addExpectation(expectation); } final Lifecycle lifecycle = new Lifecycle(); @@ -611,7 +610,7 @@ private void testExceptionHandling( assertFalse(listener.isDone()); } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { ThreadPool.terminate(testThreadPool, 30, TimeUnit.SECONDS); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index 9227fb7b85062..679432f8b60a0 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; @@ -35,7 +35,7 @@ public void testLoggingHandler() throws IOException { + ", header size: \\d+B" + ", action: cluster:monitor/stats]" + " WRITE: \\d+B"; - final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation( + final MockLog.LoggingExpectation writeExpectation = new MockLog.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, @@ -50,20 +50,20 @@ public void testLoggingHandler() throws IOException { + ", action: cluster:monitor/stats]" + " READ: \\d+B"; - final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation( + final MockLog.LoggingExpectation readExpectation = new MockLog.PatternSeenEventExpectation( "cluster monitor request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern ); - try (var appender = MockLogAppender.capture(TransportLogger.class)) { - appender.addExpectation(writeExpectation); - appender.addExpectation(readExpectation); + try (var mockLog = MockLog.capture(TransportLogger.class)) { + mockLog.addExpectation(writeExpectation); + mockLog.addExpectation(readExpectation); BytesReference bytesReference = buildRequest(); TransportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6)); TransportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 52614dee8d04a..b1eddf927d3f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -31,6 +31,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.io.IOException; +import java.io.InputStream; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -413,7 +414,7 @@ public BytesRefIterator iterator() { } @Override - public void fillWith(StreamInput streamInput) throws IOException { + public void fillWith(InputStream streamInput) throws IOException { in.fillWith(streamInput); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index fbf0d1f0d9ac1..134352a4f8af4 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.OrdinalMap; +import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.TermsEnum; @@ -1397,6 +1398,18 @@ long getCardinality(IndexReader reader, String field) { subs[i] = sortedDocValues.termsEnum(); weights[i] = sortedDocValues.getValueCount(); } + case NUMERIC, SORTED_NUMERIC -> { + final byte[] min = PointValues.getMinPackedValue(reader, field); + final byte[] max = PointValues.getMaxPackedValue(reader, field); + if (min != null && max != null) { + if (min.length == 4) { + return NumericUtils.sortableBytesToInt(max, 0) - NumericUtils.sortableBytesToInt(min, 0); + } else if (min.length == 8) { + return NumericUtils.sortableBytesToLong(max, 0) - NumericUtils.sortableBytesToLong(min, 0); + } + } + return -1; + } default -> { return -1; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 8f1a0072c9a51..6ef8d3d8a6a1b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -450,6 +450,7 @@ private static class ServiceHolder implements Closeable { List entries = new ArrayList<>(); entries.addAll(IndicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); + pluginsService.forEach(plugin -> entries.addAll(plugin.getNamedWriteables())); namedWriteableRegistry = new NamedWriteableRegistry(entries); parserConfiguration = XContentParserConfiguration.EMPTY.withRegistry( new NamedXContentRegistry( @@ -565,6 +566,7 @@ public void onRemoval(ShardId shardId, Accountable accountable) { ) .numberOfShards(1) .numberOfReplicas(0) + .putInferenceFields(mapperService.mappingLookup().inferenceFields()) .build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index d3833fdb3a778..51235a459e28c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -466,7 +466,7 @@ public void testToQuery() throws IOException { /* we use a private rewrite context here since we want the most realistic way of asserting that we are cacheable or not. * We do it this way in SearchService where * we first rewrite the query with a private context, then reset the context and then build the actual lucene query*/ - QueryBuilder rewritten = rewriteQuery(firstQuery, new SearchExecutionContext(context)); + QueryBuilder rewritten = rewriteQuery(firstQuery, createQueryRewriteContext(), new SearchExecutionContext(context)); Query firstLuceneQuery = rewritten.toQuery(context); assertNotNull("toQuery should not return null", firstLuceneQuery); assertLuceneQuery(firstQuery, firstLuceneQuery, context); @@ -500,7 +500,9 @@ public void testToQuery() throws IOException { ); } context = new SearchExecutionContext(context); - Query secondLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context); + Query secondLuceneQuery = rewriteQuery(secondQuery, createQueryRewriteContext(), new SearchExecutionContext(context)).toQuery( + context + ); assertNotNull("toQuery should not return null", secondLuceneQuery); assertLuceneQuery(secondQuery, secondLuceneQuery, context); @@ -519,7 +521,8 @@ public void testToQuery() throws IOException { if (supportsBoost() && firstLuceneQuery instanceof MatchNoDocsQuery == false) { secondQuery.boost(firstQuery.boost() + 1f + randomFloat()); - Query thirdLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context); + Query thirdLuceneQuery = rewriteQuery(secondQuery, createQueryRewriteContext(), new SearchExecutionContext(context)) + .toQuery(context); assertNotEquals( "modifying the boost doesn't affect the corresponding lucene query", rewrite(firstLuceneQuery), @@ -529,8 +532,47 @@ public void testToQuery() throws IOException { } } - protected QueryBuilder rewriteQuery(QB queryBuilder, QueryRewriteContext rewriteContext) throws IOException { - QueryBuilder rewritten = rewriteAndFetch(queryBuilder, rewriteContext); + /** + * Simulate rewriting the query builder exclusively on the data node. + *
+ *
+ * NOTE: This simulation does not reflect how the query builder will be rewritten in production. + * See {@link AbstractQueryTestCase#rewriteQuery(AbstractQueryBuilder, QueryRewriteContext, SearchExecutionContext)} for a more accurate + * simulation. + * + * @param queryBuilder The query builder to rewrite + * @param shardRewriteContext The data node rewrite context + * @return The rewritten query builder + * @throws IOException + */ + protected QueryBuilder rewriteQuery(QB queryBuilder, SearchExecutionContext shardRewriteContext) throws IOException { + QueryBuilder rewritten = rewriteAndFetch(queryBuilder, shardRewriteContext); + // extra safety to fail fast - serialize the rewritten version to ensure it's serializable. + assertSerialization(rewritten); + return rewritten; + } + + /** + * Simulate rewriting the query builder in stages across the coordinator node and data node. + * It is rewritten on the coordinator node first, then again on the data node. + * + * @param queryBuilder The query builder to rewrite + * @param coordinatorRewriteContext the coordinator node rewrite context + * @param shardRewriteContext The data node rewrite context + * @return The rewritten query builder + * @throws IOException + */ + protected QueryBuilder rewriteQuery( + QB queryBuilder, + QueryRewriteContext coordinatorRewriteContext, + SearchExecutionContext shardRewriteContext + ) throws IOException { + // The first rewriteAndFetch call simulates rewriting on the coordinator node + // The second rewriteAndFetch call simulates rewriting on the shard + QueryBuilder rewritten = rewriteAndFetch(queryBuilder, coordinatorRewriteContext); + // extra safety to fail fast - serialize the rewritten version to ensure it's serializable. + assertSerialization(rewritten); + rewritten = rewriteAndFetch(rewritten, shardRewriteContext); // extra safety to fail fast - serialize the rewritten version to ensure it's serializable. assertSerialization(rewritten); return rewritten; @@ -894,7 +936,7 @@ public boolean isTextField(String fieldName) { public void testCacheability() throws IOException { QB queryBuilder = createTestQueryBuilder(); SearchExecutionContext context = createSearchExecutionContext(); - QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new SearchExecutionContext(context)); + QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, createQueryRewriteContext(), new SearchExecutionContext(context)); assertNotNull(rewriteQuery.toQuery(context)); assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 80f9f2abea184..b9f425f51c068 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -260,7 +260,7 @@ public static void resetPortCounter() { // TODO: consolidate logging initialization for tests so it all occurs in logconfigurator LogConfigurator.loadLog4jPlugins(); LogConfigurator.configureESLogging(); - MockLogAppender.init(); + MockLog.init(); final List testAppenders = new ArrayList<>(3); for (String leakLoggerName : Arrays.asList("io.netty.util.ResourceLeakDetector", LeakTracker.class.getName())) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLog.java similarity index 81% rename from test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java rename to test/framework/src/main/java/org/elasticsearch/test/MockLog.java index ac54336b09641..d4ff904471915 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLog.java @@ -31,12 +31,11 @@ /** * Test appender that can be used to verify that certain events were logged correctly */ -public class MockLogAppender implements Releasable { +public class MockLog implements Releasable { - private static final Map> mockAppenders = new ConcurrentHashMap<>(); - private static final RealMockAppender parent = new RealMockAppender(); - // TODO: this can become final once the ctor is made private - private List loggers = List.of(); + private static final Map> mockLogs = new ConcurrentHashMap<>(); + private static final MockAppender appender = new MockAppender(); + private final List loggers; private final List expectations; private volatile boolean isAlive = true; @@ -44,7 +43,7 @@ public class MockLogAppender implements Releasable { public void close() { isAlive = false; for (String logger : loggers) { - mockAppenders.compute(logger, (k, v) -> { + mockLogs.compute(logger, (k, v) -> { assert v != null; v.remove(this); return v.isEmpty() ? null : v; @@ -60,20 +59,20 @@ public void close() { } } - private static class RealMockAppender extends AbstractAppender { + private static class MockAppender extends AbstractAppender { - RealMockAppender() { + MockAppender() { super("mock", null, null, false, Property.EMPTY_ARRAY); } @Override public void append(LogEvent event) { - List appenders = mockAppenders.get(event.getLoggerName()); + List appenders = mockLogs.get(event.getLoggerName()); if (appenders == null) { // check if there is a root appender - appenders = mockAppenders.getOrDefault("", List.of()); + appenders = mockLogs.getOrDefault("", List.of()); } - for (MockLogAppender appender : appenders) { + for (MockLog appender : appenders) { if (appender.isAlive == false) { continue; } @@ -84,17 +83,13 @@ public void append(LogEvent event) { } } - public MockLogAppender() { + private MockLog(List loggers) { /* * We use a copy-on-write array list since log messages could be appended while we are setting up expectations. When that occurs, * we would run into a concurrent modification exception from the iteration over the expectations in #append, concurrent with a * modification from #addExpectation. */ expectations = new CopyOnWriteArrayList<>(); - } - - private MockLogAppender(List loggers) { - this(); this.loggers = loggers; } @@ -102,8 +97,8 @@ private MockLogAppender(List loggers) { * Initialize the mock log appender with the log4j system. */ public static void init() { - parent.start(); - Loggers.addAppender(LogManager.getLogger(""), parent); + appender.start(); + Loggers.addAppender(LogManager.getLogger(""), appender); } public void addExpectation(LoggingExpectation expectation) { @@ -294,47 +289,35 @@ public String toString() { } } - public Releasable capturing(Class... classes) { - this.loggers = Arrays.stream(classes).map(Class::getCanonicalName).toList(); - addToMockAppenders(this, loggers); - return this; - } - - public Releasable capturing(String... names) { - this.loggers = Arrays.asList(names); - addToMockAppenders(this, loggers); - return this; - } - /** - * Adds the list of class loggers to this {@link MockLogAppender}. + * Adds the list of class loggers to this {@link MockLog}. * - * Stops and runs some checks on the {@link MockLogAppender} once the returned object is released. + * Stops and runs some checks on the {@link MockLog} once the returned object is released. */ - public static MockLogAppender capture(Class... classes) { + public static MockLog capture(Class... classes) { return create(Arrays.stream(classes).map(Class::getCanonicalName).toList()); } /** * Same as above except takes string class names of each logger. */ - public static MockLogAppender capture(String... names) { + public static MockLog capture(String... names) { return create(Arrays.asList(names)); } - private static MockLogAppender create(List loggers) { - MockLogAppender appender = new MockLogAppender(loggers); - addToMockAppenders(appender, loggers); + private static MockLog create(List loggers) { + MockLog appender = new MockLog(loggers); + addToMockLogs(appender, loggers); return appender; } - private static void addToMockAppenders(MockLogAppender appender, List loggers) { + private static void addToMockLogs(MockLog mockLog, List loggers) { for (String logger : loggers) { - mockAppenders.compute(logger, (k, v) -> { + mockLogs.compute(logger, (k, v) -> { if (v == null) { v = new CopyOnWriteArrayList<>(); } - v.add(appender); + v.add(mockLog); return v; }); } @@ -343,13 +326,13 @@ private static void addToMockAppenders(MockLogAppender appender, List lo /** * Executes an action and verifies expectations against the provided logger */ - public static void assertThatLogger(Runnable action, Class loggerOwner, MockLogAppender.LoggingExpectation... expectations) { - try (var mockAppender = MockLogAppender.capture(loggerOwner)) { + public static void assertThatLogger(Runnable action, Class loggerOwner, MockLog.LoggingExpectation... expectations) { + try (var mockLog = MockLog.capture(loggerOwner)) { for (var expectation : expectations) { - mockAppender.addExpectation(expectation); + mockLog.addExpectation(expectation); } action.run(); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 89d10acb6ec45..3dc7201535e0a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -52,7 +52,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -1319,15 +1319,15 @@ public void handleException(TransportException exp) {} .build() ); - try (var appender = MockLogAppender.capture("org.elasticsearch.transport.TransportService.tracer")) { + try (var mockLog = MockLog.capture("org.elasticsearch.transport.TransportService.tracer")) { //////////////////////////////////////////////////////////////////////// // tests for included action type "internal:test" // // serviceA logs the request was sent - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "sent request", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1335,8 +1335,8 @@ public void handleException(TransportException exp) {} ) ); // serviceB logs the request was received - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "received request", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1344,8 +1344,8 @@ public void handleException(TransportException exp) {} ) ); // serviceB logs the response was sent - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "sent response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1353,8 +1353,8 @@ public void handleException(TransportException exp) {} ) ); // serviceA logs the response was received - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "received response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1364,7 +1364,7 @@ public void handleException(TransportException exp) {} serviceA.sendRequest(nodeB, "internal:test", new StringMessageRequest("", 10), noopResponseHandler); - assertBusy(appender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); //////////////////////////////////////////////////////////////////////// // tests for included action type "internal:testError" which returns an error @@ -1373,8 +1373,8 @@ public void handleException(TransportException exp) {} // appender down. The logging happens after messages are sent so might happen out of order. // serviceA logs the request was sent - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "sent request", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1382,8 +1382,8 @@ public void handleException(TransportException exp) {} ) ); // serviceB logs the request was received - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "received request", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1391,8 +1391,8 @@ public void handleException(TransportException exp) {} ) ); // serviceB logs the error response was sent - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "sent error response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1400,8 +1400,8 @@ public void handleException(TransportException exp) {} ) ); // serviceA logs the error response was sent - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "received error response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1411,7 +1411,7 @@ public void handleException(TransportException exp) {} serviceA.sendRequest(nodeB, "internal:testError", new StringMessageRequest(""), noopResponseHandler); - assertBusy(appender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); //////////////////////////////////////////////////////////////////////// // tests for excluded action type "internal:testNotSeen" @@ -1420,8 +1420,8 @@ public void handleException(TransportException exp) {} // The logging happens after messages are sent so might happen after the response future is completed. // serviceA does not log that it sent the message - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "not seen request sent", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1429,8 +1429,8 @@ public void handleException(TransportException exp) {} ) ); // serviceB does log that it received the request - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "not seen request received", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1438,8 +1438,8 @@ public void handleException(TransportException exp) {} ) ); // serviceB does log that it sent the response - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "not seen request received", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1447,8 +1447,8 @@ public void handleException(TransportException exp) {} ) ); // serviceA does not log that it received the response - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "not seen request sent", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, @@ -1458,7 +1458,7 @@ public void handleException(TransportException exp) {} submitRequest(serviceA, nodeB, "internal:testNotSeen", new StringMessageRequest(""), noopResponseHandler).get(); - assertBusy(appender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/MockLogAppenderTests.java b/test/framework/src/test/java/org/elasticsearch/test/MockLogTests.java similarity index 79% rename from test/framework/src/test/java/org/elasticsearch/test/MockLogAppenderTests.java rename to test/framework/src/test/java/org/elasticsearch/test/MockLogTests.java index 4973bb83311bc..2019867c9b629 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/MockLogAppenderTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/MockLogTests.java @@ -13,10 +13,10 @@ import java.util.concurrent.atomic.AtomicBoolean; -public class MockLogAppenderTests extends ESTestCase { +public class MockLogTests extends ESTestCase { public void testConcurrentLogAndLifecycle() throws Exception { - Logger logger = LogManager.getLogger(MockLogAppenderTests.class); + Logger logger = LogManager.getLogger(MockLogTests.class); final var keepGoing = new AtomicBoolean(true); final var logThread = new Thread(() -> { while (keepGoing.get()) { @@ -25,9 +25,8 @@ public void testConcurrentLogAndLifecycle() throws Exception { }); logThread.start(); - final var appender = new MockLogAppender(); for (int i = 0; i < 1000; i++) { - try (var ignored = appender.capturing(MockLogAppenderTests.class)) { + try (var mockLog = MockLog.capture(MockLogTests.class)) { Thread.yield(); } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index d32b5684e19a9..8d9662bfdc074 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -38,7 +38,8 @@ public final class Features { "allowed_warnings", "allowed_warnings_regex", "close_to", - "is_after" + "is_after", + "capabilities" ); private Features() { diff --git a/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java b/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java index 787a3b334d70c..3549d6cfa0b68 100644 --- a/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java +++ b/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java @@ -11,7 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.logging.log4j.Level; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; @@ -33,10 +33,9 @@ public static Iterable parameters() throws Exception { ) @Override public void test() throws IOException { - final MockLogAppender mockLogAppender = new MockLogAppender(); - try (var ignored = mockLogAppender.capturing(ESClientYamlSuiteTestCaseFailLogIT.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(ESClientYamlSuiteTestCaseFailLogIT.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "message with dump of the test yaml", ESClientYamlSuiteTestCaseFailLogIT.class.getCanonicalName(), Level.INFO, @@ -44,8 +43,8 @@ public void test() throws IOException { ) ); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "message with stash dump of response", ESClientYamlSuiteTestCaseFailLogIT.class.getCanonicalName(), Level.INFO, @@ -62,7 +61,7 @@ public void test() throws IOException { } } - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java index 1e7e30dca0954..5f361d1ef772a 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.monitor.os.OsProbe; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.autoscaling.AutoscalingIntegTestCase; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingCapacity; @@ -47,10 +47,9 @@ public void testCurrentCapacity() throws Exception { } public void assertCurrentCapacity(long memory, long storage, int nodes) { - MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing(TransportGetAutoscalingCapacityAction.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(TransportGetAutoscalingCapacityAction.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "autoscaling capacity response message with " + storage, TransportGetAutoscalingCapacityAction.class.getName(), Level.DEBUG, @@ -68,7 +67,7 @@ public void assertCurrentCapacity(long memory, long storage, int nodes) { assertThat(currentCapacity.total().memory().getBytes(), Matchers.equalTo(memory * nodes)); assertThat(currentCapacity.node().storage().getBytes(), Matchers.equalTo(storage)); assertThat(currentCapacity.total().storage().getBytes(), Matchers.equalTo(storage * nodes)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index 350fd5075f940..549c5146e6c79 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.CcrSingleNodeTestCase; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; @@ -133,11 +133,9 @@ public void onFailure(final Exception e) { public void testAutoFollowCoordinatorLogsSkippingAutoFollowCoordinationWithNonCompliantLicense() throws Exception { final Logger logger = LogManager.getLogger(AutoFollowCoordinator.class); - final MockLogAppender appender = new MockLogAppender(); - - try (var ignored = appender.capturing(AutoFollowCoordinator.class)) { - appender.addExpectation( - new MockLogAppender.ExceptionSeenEventExpectation( + try (var mockLog = MockLog.capture(AutoFollowCoordinator.class)) { + mockLog.addExpectation( + new MockLog.ExceptionSeenEventExpectation( getTestName(), logger.getName(), Level.WARN, @@ -197,7 +195,7 @@ public void onFailure(Exception e) { } }); latch.await(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 7861826031be9..5b1382a3ec09a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -92,7 +92,7 @@ protected void doExecute( BytesReference pagedBytesReference = BytesReference.fromByteArray(array, bytesRequested); try (ReleasableBytesReference reference = new ReleasableBytesReference(pagedBytesReference, array)) { try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID)) { - long offsetAfterRead = sessionReader.readFileBytes(fileName, reference); + long offsetAfterRead = sessionReader.readFileBytes(fileName, array); long offsetBeforeRead = offsetAfterRead - reference.length(); ActionListener.respondAndRelease(listener, new GetCcrRestoreFileChunkResponse(offsetBeforeRead, reference)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index 1a822e2dce935..fa9438353779f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -11,12 +11,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.common.util.CombinedRateLimiter; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.KeyedLock; @@ -243,7 +242,7 @@ private Store.MetadataSnapshot getMetadata() throws IOException { } } - private long readFileBytes(String fileName, BytesReference reference) throws IOException { + private long readFileBytes(String fileName, ByteArray reference) throws IOException { try (Releasable ignored = keyedLock.acquire(fileName)) { final IndexInput indexInput = cachedInputs.computeIfAbsent(fileName, f -> { try { @@ -253,11 +252,7 @@ private long readFileBytes(String fileName, BytesReference reference) throws IOE } }); - BytesRefIterator refIterator = reference.iterator(); - BytesRef ref; - while ((ref = refIterator.next()) != null) { - indexInput.readBytes(ref.bytes, ref.offset, ref.length); - } + reference.fillWith(new InputStreamIndexInput(indexInput, reference.size())); long offsetAfterRead = indexInput.getFilePointer(); @@ -302,9 +297,9 @@ private SessionReader(RestoreSession restoreSession, CcrSettings ccrSettings, Lo * @return the offset of the file after the read is complete * @throws IOException if the read fails */ - public long readFileBytes(String fileName, BytesReference reference) throws IOException { + public long readFileBytes(String fileName, ByteArray reference) throws IOException { CombinedRateLimiter rateLimiter = ccrSettings.getRateLimiter(); - long throttleTime = rateLimiter.maybePause(reference.length()); + long throttleTime = rateLimiter.maybePause(Math.toIntExact(reference.size())); throttleListener.accept(throttleTime); return restoreSession.readFileBytes(fileName, reference); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java index ed5670a4bcc3b..99344f22bae31 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java @@ -10,9 +10,10 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; @@ -171,7 +172,7 @@ public void testGetSessionReader() throws IOException { String fileName = fileMetadata.name(); byte[] expectedBytes = new byte[(int) fileMetadata.length()]; - byte[] actualBytes = new byte[(int) fileMetadata.length()]; + var actualBytes = BigArrays.NON_RECYCLING_INSTANCE.newByteArray(fileMetadata.length(), false); try ( Engine.IndexCommitRef indexCommitRef = indexShard1.acquireSafeIndexCommit(); IndexInput indexInput = indexCommitRef.getIndexCommit().getDirectory().openInput(fileName, IOContext.READONCE) @@ -180,13 +181,13 @@ public void testGetSessionReader() throws IOException { indexInput.readBytes(expectedBytes, 0, (int) fileMetadata.length()); } - BytesArray byteArray = new BytesArray(actualBytes); try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID1)) { - long offset = sessionReader.readFileBytes(fileName, byteArray); + long offset = sessionReader.readFileBytes(fileName, actualBytes); assertEquals(offset, fileMetadata.length()); } - assertArrayEquals(expectedBytes, actualBytes); + assertTrue(actualBytes.hasArray()); + assertArrayEquals(expectedBytes, actualBytes.array()); restoreSourceService.closeSession(sessionUUID1); closeShards(indexShard1); } @@ -206,12 +207,12 @@ public void testGetSessionDoesNotLeakFileIfClosed() throws IOException { indexShard.snapshotStoreMetadata().forEach(files::add); try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID)) { - sessionReader.readFileBytes(files.get(0).name(), new BytesArray(new byte[10])); + sessionReader.readFileBytes(files.get(0).name(), MockBigArrays.NON_RECYCLING_INSTANCE.newByteArray(10, false)); } // Request a second file to ensure that original file is not leaked try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID)) { - sessionReader.readFileBytes(files.get(1).name(), new BytesArray(new byte[10])); + sessionReader.readFileBytes(files.get(1).name(), MockBigArrays.NON_RECYCLING_INSTANCE.newByteArray(10, false)); } assertTrue(EngineTestCase.hasAcquiredIndexCommits(IndexShardTestCase.getEngine(indexShard))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java index 0ac7fc487c8a2..2a43500469491 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java @@ -13,7 +13,7 @@ import org.apache.logging.log4j.message.Message; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; import org.junit.Before; @@ -34,10 +34,10 @@ public void testCheckFailureAgainstUndefinedFieldLogsMessage() throws Exception ExpressionModel model = new ExpressionModel(); model.defineField("some_int", randomIntBetween(1, 99)); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( () -> assertThat(model.test("another_field", List.of(new FieldValue("bork"), new FieldValue("bork!"))), is(false)), ExpressionModel.class, - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "undefined field", model.getClass().getName(), Level.DEBUG, @@ -51,7 +51,7 @@ public void testCheckSuccessAgainstUndefinedFieldDoesNotLog() throws Exception { ExpressionModel model = new ExpressionModel(); model.defineField("some_int", randomIntBetween(1, 99)); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( () -> assertThat(model.test("another_field", List.of(new FieldValue(null))), is(true)), ExpressionModel.class, new NoMessagesExpectation() @@ -62,14 +62,14 @@ public void testCheckAgainstDefinedFieldDoesNotLog() throws Exception { ExpressionModel model = new ExpressionModel(); model.defineField("some_int", randomIntBetween(1, 99)); - MockLogAppender.assertThatLogger( + MockLog.assertThatLogger( () -> assertThat(model.test("some_int", List.of(new FieldValue(randomIntBetween(100, 200)))), is(false)), ExpressionModel.class, new NoMessagesExpectation() ); } - private class NoMessagesExpectation implements MockLogAppender.LoggingExpectation { + private class NoMessagesExpectation implements MockLog.LoggingExpectation { private final List messages = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index 6598ac486d53e..5369c95ad6fa7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -42,7 +42,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; @@ -192,10 +192,9 @@ public void testLogWarningIfBitSetExceedsCacheSize() throws Exception { assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(cache.getClass())) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(cache.getClass())) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "[bitset too big]", cache.getClass().getName(), Level.WARN, @@ -216,7 +215,7 @@ public void testLogWarningIfBitSetExceedsCacheSize() throws Exception { assertThat(bitSet.ramBytesUsed(), equalTo(EXPECTED_BYTES_PER_BIT_SET)); }); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -230,10 +229,9 @@ public void testLogMessageIfCacheFull() throws Exception { assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(cache.getClass())) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(cache.getClass())) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "[cache full]", cache.getClass().getName(), Level.INFO, @@ -252,7 +250,7 @@ public void testLogMessageIfCacheFull() throws Exception { } }); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/module-info.java b/x-pack/plugin/esql/compute/src/main/java/module-info.java index 429a89f720139..040fafaac4410 100644 --- a/x-pack/plugin/esql/compute/src/main/java/module-info.java +++ b/x-pack/plugin/esql/compute/src/main/java/module-info.java @@ -28,4 +28,5 @@ exports org.elasticsearch.compute.aggregation.spatial; exports org.elasticsearch.compute.operator.topn; exports org.elasticsearch.compute.operator.mvdedupe; + exports org.elasticsearch.compute.aggregation.table; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java index 809c433a000a7..22fee4e595b2e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java @@ -199,8 +199,13 @@ public boolean hasNext() { @Override public IntBlock next() { - int size = Math.toIntExact(Math.min(Integer.MAX_VALUE, targetByteSize / Integer.BYTES / 2)); + int size = Math.toIntExact(Math.min(positionCount - position, targetByteSize / Integer.BYTES / 2)); try (IntBlock.Builder ords = blockFactory.newIntBlockBuilder(size)) { + if (ords.estimatedBytes() > targetByteSize) { + throw new IllegalStateException( + "initial builder overshot target [" + ords.estimatedBytes() + "] vs [" + targetByteSize + "]" + ); + } while (position < positionCount && ords.estimatedBytes() < targetByteSize) { // TODO a test where targetByteSize is very small should still make a few rows. boolean singleEntry = startPosition(groups); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/AscendingSequenceRowInTableLookup.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/AscendingSequenceRowInTableLookup.java new file mode 100644 index 0000000000000..bcb245146c2c6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/AscendingSequenceRowInTableLookup.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.table; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; + +/** + * {@link RowInTableLookup} that models an increasing sequence of integers. + */ +public final class AscendingSequenceRowInTableLookup extends RowInTableLookup { + private final BlockFactory blockFactory; + private final int min; + private final int max; + + public AscendingSequenceRowInTableLookup(BlockFactory blockFactory, int min, int max) { + this.blockFactory = blockFactory; + this.min = min; + this.max = max; + } + + @Override + public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + IntBlock block = page.getBlock(0); + IntVector vector = block.asVector(); + int target = Math.toIntExact(targetBlockSize.getBytes()); + if (vector != null && vector.getPositionCount() * Integer.BYTES < target) { + return ReleasableIterator.single(lookupVector(vector)); + } + return new Lookup(block, target); + } + + private IntBlock lookupVector(IntVector vector) { + if (vector.min() >= min && vector.max() < max) { + if (min == 0) { + vector.incRef(); + return vector.asBlock(); + } + return lookupVectorInRange(vector).asBlock(); + } + return lookupVectorMaybeInRange(vector); + } + + private IntVector lookupVectorInRange(IntVector vector) { + try (IntVector.Builder builder = blockFactory.newIntVectorFixedBuilder(vector.getPositionCount())) { + for (int i = 0; i < vector.getPositionCount(); i++) { + builder.appendInt(vector.getInt(i) - min); + } + return builder.build(); + } + } + + private IntBlock lookupVectorMaybeInRange(IntVector vector) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(vector.getPositionCount())) { + for (int i = 0; i < vector.getPositionCount(); i++) { + int v = vector.getInt(i); + if (v < min || v >= max) { + builder.appendNull(); + } else { + builder.appendInt(v - min); + } + } + return builder.build(); + } + } + + @Override + public String toString() { + return "AscendingSequence[" + min + "-" + max + "]"; + } + + private class Lookup implements ReleasableIterator { + private final IntBlock block; + private final int target; + + int p; + + private Lookup(IntBlock block, int target) { + this.block = block; + this.target = target; + block.incRef(); + } + + @Override + public boolean hasNext() { + return p < block.getPositionCount(); + } + + @Override + public IntBlock next() { + int initialEntries = Math.min(target / Integer.BYTES / 2, block.getPositionCount() - p); + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(initialEntries)) { + if (builder.estimatedBytes() > target) { + throw new IllegalStateException( + "initial builder overshot target [" + builder.estimatedBytes() + "] vs [" + target + "]" + ); + } + while (p < block.getPositionCount() && builder.estimatedBytes() < target) { + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + int first = -1; + boolean started = false; + for (int i = start; i < end; i++) { + int v = block.getInt(i); + if (v < min || v >= max) { + continue; + } + if (first < 0) { + first = v - min; + continue; + } + if (started == false) { + builder.beginPositionEntry(); + builder.appendInt(first); + started = true; + } + builder.appendInt(v - min); + } + p++; + if (started) { + builder.endPositionEntry(); + continue; + } + if (first < 0) { + builder.appendNull(); + continue; + } + builder.appendInt(first); + } + return builder.build(); + } + } + + @Override + public void close() { + block.decRef(); + } + + @Override + public String toString() { + return "AscendingSequence[" + p + "/" + block.getPositionCount() + "]"; + } + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java new file mode 100644 index 0000000000000..1acd1c30ed334 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.table; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; + +import java.util.ArrayList; +import java.util.List; + +final class BlockHashRowInTableLookup extends RowInTableLookup { + private final BlockHash hash; + + BlockHashRowInTableLookup(BlockFactory blockFactory, Block[] keys) { + List groups = new ArrayList<>(keys.length); + for (int k = 0; k < keys.length; k++) { + groups.add(new BlockHash.GroupSpec(k, keys[k].elementType())); + } + + hash = BlockHash.buildPackedValuesBlockHash( + groups, + blockFactory, + (int) BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() + ); + boolean success = false; + try { + hash.add(new Page(keys), new GroupingAggregatorFunction.AddInput() { + private int lastOrd = -1; + + @Override + public void add(int positionOffset, IntBlock groupIds) { + for (int p = 0; p < groupIds.getPositionCount(); p++) { + int first = groupIds.getFirstValueIndex(p); + int end = groupIds.getValueCount(p) + first; + for (int i = first; i < end; i++) { + int ord = groupIds.getInt(i); + if (ord != lastOrd + 1) { + // TODO double check these errors over REST once we have LOOKUP + throw new IllegalArgumentException("found a duplicate row"); + } + lastOrd = ord; + } + } + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + for (int p = 0; p < groupIds.getPositionCount(); p++) { + int ord = groupIds.getInt(p); + if (ord != lastOrd + 1) { + throw new IllegalArgumentException("found a duplicate row"); + } + lastOrd = ord; + } + } + }); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + @Override + public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + return hash.lookup(page, targetBlockSize); + } + + @Override + public String toString() { + return hash.toString(); + } + + @Override + public void close() { + hash.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/EmptyRowInTableLookup.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/EmptyRowInTableLookup.java new file mode 100644 index 0000000000000..b2da6d51d3a9b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/EmptyRowInTableLookup.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.table; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; + +/** + * {@link RowInTableLookup} for an empty table. + */ +public final class EmptyRowInTableLookup extends RowInTableLookup { + private final BlockFactory blockFactory; + + public EmptyRowInTableLookup(BlockFactory blockFactory) { + this.blockFactory = blockFactory; + } + + @Override + public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + return ReleasableIterator.single((IntBlock) blockFactory.newConstantNullBlock(page.getPositionCount())); + } + + @Override + public void close() {} + + @Override + public String toString() { + return "EmptyLookup"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java new file mode 100644 index 0000000000000..1303fc701c595 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.table; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; + +/** + * Consumes {@link Page}s and looks up each row in a pre-built table, and returns the + * offsets of each row in the table. + */ +public abstract sealed class RowInTableLookup implements Releasable permits EmptyRowInTableLookup, AscendingSequenceRowInTableLookup, + BlockHashRowInTableLookup { + /** + * Lookup the values in the {@link Page} and, for each row, return the offset in the + * table that was provided when building the lookup. + *

+ * The returned {@link ReleasableIterator} may retain a reference to {@link Block}s + * inside the {@link Page}. Close it to release those references. + *

+ */ + public abstract ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize); + + @Override + public abstract String toString(); + + public static RowInTableLookup build(BlockFactory blockFactory, Block[] keys) { + int positions = keys[0].getPositionCount(); + for (int k = 0; k < keys.length; k++) { + if (positions != keys[k].getPositionCount()) { + // TODO double check these errors over REST once we have LOOKUP + throw new IllegalArgumentException( + "keys must have the same number of positions but [" + positions + "] != [" + keys[k].getPositionCount() + "]" + ); + } + if (keys[k].mayHaveMultivaluedFields()) { + for (int p = 0; p < keys[k].getPositionCount(); p++) { + if (keys[k].getValueCount(p) > 1) { + // TODO double check these errors over REST once we have LOOKUP + throw new IllegalArgumentException("only single valued keys are supported"); + } + } + } + } + if (positions == 0) { + return new EmptyRowInTableLookup(blockFactory); + } + if (keys.length == 1) { + RowInTableLookup lookup = single(blockFactory, keys[0]); + if (lookup != null) { + return lookup; + } + } + return new BlockHashRowInTableLookup(blockFactory, keys); + } + + /** + * Build a {@link RowInTableLookup} for a single {@link Block} or returns {@code null} + * if we don't have a special implementation for this single block. + */ + private static RowInTableLookup single(BlockFactory blockFactory, Block b) { + if (b.elementType() != ElementType.INT) { + return null; + } + IntVector v = (IntVector) b.asVector(); + if (v == null) { + return null; + } + int first = v.getInt(0); + for (int i = 1; i < v.getPositionCount(); i++) { + if (v.getInt(i) - first != i) { + return null; + } + } + return new AscendingSequenceRowInTableLookup(blockFactory, first, first + v.getPositionCount()); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java index c7f12d1099cc1..9c35b5a44d5d3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -48,7 +49,7 @@ public Factory( int taskConcurrency, int limit ) { - super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit); + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 10c78be15bd86..184f28e750aec 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; @@ -80,16 +81,22 @@ public abstract static class Factory implements SourceOperator.SourceOperatorFac protected final int limit; protected final LuceneSliceQueue sliceQueue; + /** + * Build the factory. + * + * @param scoreMode the {@link ScoreMode} passed to {@link IndexSearcher#createWeight} + */ protected Factory( List contexts, Function queryFunction, DataPartitioning dataPartitioning, int taskConcurrency, - int limit + int limit, + ScoreMode scoreMode ) { this.limit = limit; this.dataPartitioning = dataPartitioning; - var weightFunction = weightFunction(queryFunction, ScoreMode.COMPLETE_NO_SCORES); + var weightFunction = weightFunction(queryFunction, scoreMode); this.sliceQueue = LuceneSliceQueue.create(contexts, weightFunction, dataPartitioning, taskConcurrency); this.taskConcurrency = Math.min(sliceQueue.totalSlices(), taskConcurrency); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 64836b00a7e1b..3721fec3b2eb8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -7,9 +7,11 @@ package org.elasticsearch.compute.lucene; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.IntBlock; @@ -47,7 +49,7 @@ public Factory( int maxPageSize, int limit ) { - super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit); + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); this.maxPageSize = maxPageSize; } @@ -89,6 +91,8 @@ public void collect(int doc) { --remainingDocs; docsBuilder.appendInt(doc); currentPagePos++; + } else { + throw new CollectionTerminatedException(); } } }; @@ -116,14 +120,19 @@ public Page getCheckedOutput() throws IOException { if (scorer == null) { return null; } - scorer.scoreNextRange( - leafCollector, - scorer.leafReaderContext().reader().getLiveDocs(), - // Note: if (maxPageSize - currentPagePos) is a small "remaining" interval, this could lead to slow collection with a - // highly selective filter. Having a large "enough" difference between max- and minPageSize (and thus currentPagePos) - // alleviates this issue. - maxPageSize - currentPagePos - ); + try { + scorer.scoreNextRange( + leafCollector, + scorer.leafReaderContext().reader().getLiveDocs(), + // Note: if (maxPageSize - currentPagePos) is a small "remaining" interval, this could lead to slow collection with a + // highly selective filter. Having a large "enough" difference between max- and minPageSize (and thus currentPagePos) + // alleviates this issue. + maxPageSize - currentPagePos + ); + } catch (CollectionTerminatedException ex) { + // The leaf collector terminated the execution + scorer.markAsDone(); + } Page page = null; if (currentPagePos >= minPageSize || remainingDocs <= 0 || scorer.isDone()) { pagesEmitted++; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index e9fb15d265fbe..2e32d20a2365e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TopFieldCollector; import org.elasticsearch.common.Strings; import org.elasticsearch.compute.data.BlockFactory; @@ -38,7 +39,6 @@ */ public final class LuceneTopNSourceOperator extends LuceneOperator { public static final class Factory extends LuceneOperator.Factory { - ; private final int maxPageSize; private final List> sorts; @@ -51,7 +51,7 @@ public Factory( int limit, List> sorts ) { - super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit); + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.TOP_DOCS); this.maxPageSize = maxPageSize; this.sorts = sorts; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index f26a7943730b9..887761fbd5a8b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; @@ -60,7 +61,7 @@ private TimeSeriesSortedSourceOperatorFactory( TimeValue timeSeriesPeriod, int limit ) { - super(contexts, queryFunction, DataPartitioning.SHARD, taskConcurrency, limit); + super(contexts, queryFunction, DataPartitioning.SHARD, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); this.maxPageSize = maxPageSize; this.timeSeriesPeriod = timeSeriesPeriod; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java deleted file mode 100644 index 4c2f2410addd1..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.operator; - -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; -import org.elasticsearch.compute.aggregation.blockhash.BlockHash; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.core.ReleasableIterator; -import org.elasticsearch.core.Releasables; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class HashLookupOperator extends AbstractPageMappingToIteratorOperator { - public record Key(String name, Block block) { - @Override - public String toString() { - return "{name=" - + name - + ", type=" - + block.elementType() - + ", positions=" - + block.getPositionCount() - + ", size=" - + ByteSizeValue.ofBytes(block.ramBytesUsed()) - + "}"; - } - } - - /** - * Factory for {@link HashLookupOperator}. It's received {@link Block}s - * are never closed, so we need to build them from a non-tracking factory. - */ - public record Factory(Key[] keys, int[] blockMapping) implements Operator.OperatorFactory { - @Override - public Operator get(DriverContext driverContext) { - return new HashLookupOperator(driverContext.blockFactory(), keys, blockMapping); - } - - @Override - public String describe() { - return "HashLookup[keys=" + Arrays.toString(keys) + ", mapping=" + Arrays.toString(blockMapping) + "]"; - } - } - - private final List keys; - private final BlockHash hash; - private final int[] blockMapping; - - public HashLookupOperator(BlockFactory blockFactory, Key[] keys, int[] blockMapping) { - this.blockMapping = blockMapping; - this.keys = new ArrayList<>(keys.length); - Block[] blocks = new Block[keys.length]; - List groups = new ArrayList<>(keys.length); - for (int k = 0; k < keys.length; k++) { - this.keys.add(keys[k].name); - blocks[k] = keys[k].block; - groups.add(new BlockHash.GroupSpec(k, keys[k].block.elementType())); - } - /* - * Force PackedValuesBlockHash because it assigned ordinals in order - * of arrival. We'll figure out how to adapt other block hashes to - * do that soon. Soon we must figure out how to map ordinals to rows. - * And, probably at the same time, handle multiple rows containing - * the same keys. - */ - this.hash = BlockHash.buildPackedValuesBlockHash( - groups, - blockFactory, - (int) BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() - ); - boolean success = false; - try { - final int[] lastOrd = new int[] { -1 }; - hash.add(new Page(blocks), new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntBlock groupIds) { - // TODO support multiple rows with the same keys - for (int p = 0; p < groupIds.getPositionCount(); p++) { - int first = groupIds.getFirstValueIndex(p); - int end = groupIds.getValueCount(p) + first; - for (int i = first; i < end; i++) { - int ord = groupIds.getInt(i); - if (ord != lastOrd[0] + 1) { - throw new IllegalArgumentException("found a duplicate row"); - } - lastOrd[0] = ord; - } - } - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - for (int p = 0; p < groupIds.getPositionCount(); p++) { - int ord = groupIds.getInt(p); - if (ord != lastOrd[0] + 1) { - throw new IllegalArgumentException("found a duplicate row"); - } - lastOrd[0] = ord; - } - } - }); - success = true; - } finally { - if (success == false) { - close(); - } - } - } - - @Override - protected ReleasableIterator receive(Page page) { - Page mapped = page.projectBlocks(blockMapping); - // TODO maybe this should take an array of Blocks instead? - try { - // hash.lookup increments any references we need to keep for the iterator - return appendBlocks(page, hash.lookup(mapped, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE)); - } finally { - mapped.releaseBlocks(); - } - } - - @Override - public String toString() { - return "HashLookup[keys=" + keys + ", hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; - } - - @Override - public void close() { - Releasables.close(super::close, hash); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowInTableLookupOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowInTableLookupOperator.java new file mode 100644 index 0000000000000..908c973fcad65 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowInTableLookupOperator.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.aggregation.table.RowInTableLookup; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class RowInTableLookupOperator extends AbstractPageMappingToIteratorOperator { + public record Key(String name, Block block) { + @Override + public String toString() { + return "{name=" + + name + + ", type=" + + block.elementType() + + ", positions=" + + block.getPositionCount() + + ", size=" + + ByteSizeValue.ofBytes(block.ramBytesUsed()) + + "}"; + } + } + + /** + * Factory for {@link RowInTableLookupOperator}. It's received {@link Block}s + * are never closed, so we need to build them from a non-tracking factory. + */ + public record Factory(Key[] keys, int[] blockMapping) implements Operator.OperatorFactory { + @Override + public Operator get(DriverContext driverContext) { + return new RowInTableLookupOperator(driverContext.blockFactory(), keys, blockMapping); + } + + @Override + public String describe() { + return "RowInTableLookup[keys=" + Arrays.toString(keys) + ", mapping=" + Arrays.toString(blockMapping) + "]"; + } + } + + private final List keys; + private final RowInTableLookup lookup; + private final int[] blockMapping; + + public RowInTableLookupOperator(BlockFactory blockFactory, Key[] keys, int[] blockMapping) { + this.blockMapping = blockMapping; + this.keys = new ArrayList<>(keys.length); + Block[] blocks = new Block[keys.length]; + for (int k = 0; k < keys.length; k++) { + this.keys.add(keys[k].name); + blocks[k] = keys[k].block; + } + this.lookup = RowInTableLookup.build(blockFactory, blocks); + } + + @Override + protected ReleasableIterator receive(Page page) { + Page mapped = page.projectBlocks(blockMapping); + try { + // lookup increments any references we need to keep for the iterator + return appendBlocks(page, lookup.lookup(mapped, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE)); + } finally { + mapped.releaseBlocks(); + } + } + + @Override + public String toString() { + return "RowInTableLookup[" + lookup + ", keys=" + keys + ", mapping=" + Arrays.toString(blockMapping) + "]"; + } + + @Override + public void close() { + Releasables.close(super::close, lookup); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 64afb14d22326..8902293ca945f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -55,12 +55,12 @@ import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.HashAggregationOperator; -import org.elasticsearch.compute.operator.HashLookupOperator; import org.elasticsearch.compute.operator.LimitOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.PageConsumerOperator; +import org.elasticsearch.compute.operator.RowInTableLookupOperator; import org.elasticsearch.compute.operator.SequenceLongBlockSourceOperator; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasables; @@ -371,9 +371,9 @@ public void testHashLookup() { driverContext, new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100), List.of( - new HashLookupOperator( + new RowInTableLookupOperator( driverContext.blockFactory(), - new HashLookupOperator.Key[] { new HashLookupOperator.Key("primes", primesBlock) }, + new RowInTableLookupOperator.Key[] { new RowInTableLookupOperator.Key("primes", primesBlock) }, new int[] { 0 } ) ), diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java index 0b296fcb5c18d..27ec0b979e8ae 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java @@ -337,7 +337,7 @@ private static List> readKeys(Block[] keyBlocks, int position) { return keys.stream().distinct().toList(); } - private static class KeyComparator implements Comparator> { + static class KeyComparator implements Comparator> { @Override public int compare(List lhs, List rhs) { for (int i = 0; i < lhs.size(); i++) { @@ -412,7 +412,7 @@ private static List randomKey(List types) { return types.stream().map(BlockHashRandomizedTests::randomKeyElement).toList(); } - private static Object randomKeyElement(ElementType type) { + public static Object randomKeyElement(ElementType type) { return switch (type) { case INT -> randomInt(); case LONG -> randomLong(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupRandomizedTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupRandomizedTests.java new file mode 100644 index 0000000000000..ebd588283ac07 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupRandomizedTests.java @@ -0,0 +1,350 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.table; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.aggregation.blockhash.BlockHashRandomizedTests; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.MockBlockFactory; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeTests; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matcher; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.IntStream; + +import static org.elasticsearch.compute.data.BlockTestUtils.append; +import static org.hamcrest.Matchers.any; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +//@TestLogging(value = "org.elasticsearch.compute:TRACE", reason = "debug") +public class RowInTableLookupRandomizedTests extends ESTestCase { + private static final int TRIES = 100; + private static final int ROW_COUNT = 1000; + + @ParametersFactory + public static List params() { + List params = new ArrayList<>(); + + for (int keysPerPosition : new int[] { 1, 2 }) { + for (int groups : new int[] { 1, 2, 5, 10 }) { + params.add( + new Object[] { + groups, + MultivalueDedupeTests.supportedTypes(), + IntStream.range(0, groups).mapToObj(i -> RANDOM_KEY_ELEMENT).toList(), + keysPerPosition, + 1000, + any(RowInTableLookup.class) } + ); + } + params.add( + new Object[] { + 1, + List.of(ElementType.INT), + List.of(ASCENDING), + keysPerPosition, + 1000, + any(AscendingSequenceRowInTableLookup.class) } + ); + } + return params; + } + + interface Generator { + Object gen(ElementType elementType, int row); + } + + private final int groups; + private final List allowedTypes; + private final List generators; + private final int keysPerPosition; + private final int maxTableSize; + private final Matcher expectedImplementation; + + public RowInTableLookupRandomizedTests( + @Name("groups") int groups, + @Name("allowedTypes") List allowedTypes, + @Name("generator") List generators, + @Name("keysPerPosition") int keysPerPosition, + @Name("maxTableSize") int maxTableSize, + @Name("expectedImplementation") Matcher expectedImplementation + + ) { + this.groups = groups; + this.allowedTypes = allowedTypes; + this.generators = generators; + this.keysPerPosition = keysPerPosition; + this.maxTableSize = maxTableSize; + this.expectedImplementation = expectedImplementation; + } + + public void test() { + CircuitBreaker breaker = new MockBigArrays.LimitedBreaker("esql-test-breaker", ByteSizeValue.ofGb(1)); + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, mockBreakerService(breaker)); + test(new MockBlockFactory(breaker, bigArrays)); + } + + public void testWithCranky() { + CircuitBreakerService service = new CrankyCircuitBreakerService(); + CircuitBreaker breaker = service.getBreaker(CircuitBreaker.REQUEST); + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, service); + try { + test(new MockBlockFactory(breaker, bigArrays)); + logger.info("cranky let us finish!"); + } catch (CircuitBreakingException e) { + logger.info("cranky", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void test(MockBlockFactory blockFactory) { + try (Table table = randomTable(blockFactory); RowInTableLookup offsetInTable = RowInTableLookup.build(blockFactory, table.blocks)) { + assertThat(offsetInTable, expectedImplementation); + for (int t = 0; t < TRIES; t++) { + ByteSizeValue target = ByteSizeValue.ofKb(between(1, 100)); + try ( + ToLookup toLookup = toLookup(blockFactory, table); + ReleasableIterator actual = offsetInTable.lookup(toLookup.rows, target); + ) { + int expectedIdx = 0; + while (actual.hasNext()) { + try (IntBlock lookedup = actual.next()) { + assertThat(lookedup.ramBytesUsed(), lessThan(target.getBytes() * 2)); + if (keysPerPosition == 1) { + assertThat(lookedup.asVector(), not(nullValue())); + } + for (int p = 0; p < lookedup.getPositionCount(); p++) { + assertThat(lookedup.isNull(p), equalTo(false)); + int start = lookedup.getFirstValueIndex(p); + int end = start + lookedup.getValueCount(p); + Set actualRows = new TreeSet<>(); + for (int i = start; i < end; i++) { + actualRows.add(lookedup.getInt(i)); + } + assertThat(actualRows, equalTo(toLookup.expected.get(expectedIdx))); + expectedIdx++; + } + } + } + assertThat(expectedIdx, equalTo(toLookup.expected.size())); + } + } + } + } + + private record Table(List> keys, Map, Integer> keyToRow, Block[] blocks) implements Releasable { + @Override + public void close() { + Releasables.close(blocks); + } + } + + private Table randomTable(BlockFactory blockFactory) { + List> keys = new ArrayList<>(maxTableSize); + Map, Integer> keyToRow = new HashMap<>(maxTableSize); + ElementType[] elementTypes = new ElementType[groups]; + Block.Builder[] builders = new Block.Builder[groups]; + try { + for (int g = 0; g < groups; g++) { + elementTypes[g] = randomFrom(allowedTypes); + builders[g] = elementTypes[g].newBlockBuilder(maxTableSize, blockFactory); + } + for (int k = 0; k < maxTableSize; k++) { + List key = new ArrayList<>(groups); + for (int g = 0; g < groups; g++) { + key.add(generators.get(g).gen(elementTypes[g], k)); + } + if (keyToRow.putIfAbsent(key, keys.size()) == null) { + /* + * Duplicate keys aren't allowed in constructors for OffsetInTable + * so just skip them. In most cases we'll have exactly maxTableSize + * entries, but sometimes, say if the generator is `boolean, boolean` + * we'll end up with fewer. That's fine. + */ + keys.add(key); + for (int g = 0; g < groups; g++) { + append(builders[g], key.get(g)); + } + } + } + return new Table(keys, keyToRow, Block.Builder.buildAll(builders)); + } finally { + Releasables.close(builders); + } + } + + private record ToLookup(Page rows, List> expected) implements Releasable { + @Override + public void close() { + rows.releaseBlocks(); + } + } + + ToLookup toLookup(BlockFactory blockFactory, Table table) { + List> expected = new ArrayList<>(ROW_COUNT); + Block.Builder[] builders = new Block.Builder[groups]; + try { + for (int g = 0; g < groups; g++) { + builders[g] = table.blocks[g].elementType().newBlockBuilder(ROW_COUNT, blockFactory); + } + for (int r = 0; r < ROW_COUNT; r++) { + /* + * Pick some number of "generatorKeys" to be seed this position. + * We then populate this position with all the values for every column + * in this position. So if the seed values are `(1, a)`, `(2, b)`, and `(3, c)` + * then the values in the positions will be: + * + * n=[1, 2, 3], s=[a, b, c] + * + * + * Lookup will combinatorially explode those into something like + * `(1, a)`, `(1, b)`, `(1, c)`, ... `(3, c)`. Which contains *at least* + * the seed keys. We calculate the expected value based on the combinatorial + * explosion. + * + * `null` in a key is funky because it means "no value" - so it doesn't + * participate in combinatorial explosions. We just don't add that value to + * the list. So the further combinatorial explosion *won't* contain the + * seed key that contained null. In fact, you can only match seed keys containing + * null if all values are null. That only happens if all the values for + * that column are null. That's certainly possible with `null` typed columns + * or if you get very lucky. + */ + List> generatorKeys = IntStream.range(0, keysPerPosition) + .mapToObj(k -> table.keys.get(between(0, table.keys.size() - 1))) + .toList(); + for (int g = 0; g < groups; g++) { + List values = new ArrayList<>(generatorKeys.size()); + for (List key : generatorKeys) { + Object v = key.get(g); + if (v != null) { + values.add(v); + } + } + append(builders[g], values); + } + List> explosion = combinatorialExplosion(generatorKeys); + for (List generatorKey : generatorKeys) { + /* + * All keys should be in the explosion of values. Except keys + * containing `null`. *Except except* if those keys are the + * only column. In that case there really aren't any values + * for this column - so null "shines through". + */ + if (explosion.size() == 1 || generatorKey.stream().noneMatch(Objects::isNull)) { + assertThat(explosion, hasItem(generatorKey)); + } + } + Set expectedAtPosition = new TreeSet<>(); + for (List v : explosion) { + Integer row = table.keyToRow.get(v); + if (row != null) { + expectedAtPosition.add(row); + } + } + expected.add(expectedAtPosition); + } + return new ToLookup(new Page(Block.Builder.buildAll(builders)), expected); + } finally { + Releasables.close(builders); + } + } + + // A breaker service that always returns the given breaker for getBreaker(CircuitBreaker.REQUEST) + static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { + CircuitBreakerService breakerService = mock(CircuitBreakerService.class); + when(breakerService.getBreaker(CircuitBreaker.REQUEST)).thenReturn(breaker); + return breakerService; + } + + private static final Generator RANDOM_KEY_ELEMENT = new Generator() { + @Override + public Object gen(ElementType elementType, int row) { + return BlockHashRandomizedTests.randomKeyElement(elementType); + } + + @Override + public String toString() { + return "randomKeyElement"; + } + }; + + private static final Generator ASCENDING = new Generator() { + @Override + public Object gen(ElementType elementType, int row) { + return switch (elementType) { + case INT -> row; + case LONG -> (long) row; + case DOUBLE -> (double) row; + default -> throw new IllegalArgumentException("bad element type [" + elementType + "]"); + }; + } + + @Override + public String toString() { + return "ascending"; + } + }; + + private List> combinatorialExplosion(List> values) { + List> uniqueValues = IntStream.range(0, groups).mapToObj(i -> (Set) new HashSet<>()).toList(); + for (List v : values) { + for (int g = 0; g < groups; g++) { + uniqueValues.get(g).add(v.get(g)); + } + } + return combinatorialExplosion(List.of(List.of()), uniqueValues); + } + + private List> combinatorialExplosion(List> soFar, List> remaining) { + if (remaining.isEmpty()) { + return soFar; + } + List> result = new ArrayList<>(); + for (List start : soFar) { + for (Object v : remaining.get(0)) { + List values = new ArrayList<>(start.size() + 1); + values.addAll(start); + values.add(v); + result.add(values); + } + } + return combinatorialExplosion(result, remaining.subList(1, remaining.size())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupTests.java new file mode 100644 index 0000000000000..c029f54c171cd --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.table; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.MockBlockFactory; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RowInTableLookupTests extends ESTestCase { + final CircuitBreaker breaker = new MockBigArrays.LimitedBreaker("esql-test-breaker", ByteSizeValue.ofGb(1)); + final BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, mockBreakerService(breaker)); + final MockBlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); + + public void testDuplicateInts() { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(2)) { + builder.appendInt(1); + builder.appendInt(1); + try (Block b = builder.build()) { + Exception e = expectThrows(IllegalArgumentException.class, () -> RowInTableLookup.build(blockFactory, new Block[] { b })); + assertThat(e.getMessage(), equalTo("found a duplicate row")); + } + } + } + + public void testMultivaluedInts() { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(2)) { + builder.beginPositionEntry(); + builder.appendInt(1); + builder.appendInt(2); + builder.endPositionEntry(); + try (Block b = builder.build()) { + Exception e = expectThrows(IllegalArgumentException.class, () -> RowInTableLookup.build(blockFactory, new Block[] { b })); + assertThat(e.getMessage(), equalTo("only single valued keys are supported")); + } + } + } + + public void testDuplicateBytes() { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(2)) { + builder.appendBytesRef(new BytesRef("foo")); + builder.appendBytesRef(new BytesRef("foo")); + try (Block b = builder.build()) { + Exception e = expectThrows(IllegalArgumentException.class, () -> RowInTableLookup.build(blockFactory, new Block[] { b })); + assertThat(e.getMessage(), equalTo("found a duplicate row")); + } + } + } + + public void testMultivaluedBytes() { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(2)) { + builder.beginPositionEntry(); + builder.appendBytesRef(new BytesRef("foo")); + builder.appendBytesRef(new BytesRef("bar")); + builder.endPositionEntry(); + try (Block b = builder.build()) { + Exception e = expectThrows(IllegalArgumentException.class, () -> RowInTableLookup.build(blockFactory, new Block[] { b })); + assertThat(e.getMessage(), equalTo("only single valued keys are supported")); + } + } + } + + // A breaker service that always returns the given breaker for getBreaker(CircuitBreaker.REQUEST) + static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { + CircuitBreakerService breakerService = mock(CircuitBreakerService.class); + when(breakerService.getBreaker(CircuitBreaker.REQUEST)).thenReturn(breaker); + return breakerService; + } + + @After + public void checkBreaker() { + blockFactory.ensureAllBlocksAreReleased(); + assertThat(breaker.getUsed(), is(0L)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java index c1c2c8845a962..e42e2b47e4e99 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java @@ -47,21 +47,117 @@ public static Object randomValue(ElementType e) { public static void append(Block.Builder builder, Object value) { if (value == null) { builder.appendNull(); - } else if (builder instanceof IntBlock.Builder b && value instanceof Integer v) { - b.appendInt(v); - } else if (builder instanceof LongBlock.Builder b && value instanceof Long v) { - b.appendLong(v); - } else if (builder instanceof DoubleBlock.Builder b && value instanceof Double v) { - b.appendDouble(v); - } else if (builder instanceof BytesRefBlock.Builder b && value instanceof BytesRef v) { - b.appendBytesRef(v); - } else if (builder instanceof BooleanBlock.Builder b && value instanceof Boolean v) { - b.appendBoolean(v); - } else if (builder instanceof DocBlock.Builder b && value instanceof BlockUtils.Doc v) { + return; + } + if (builder instanceof IntBlock.Builder b) { + if (value instanceof Integer v) { + b.appendInt(v); + return; + } + if (value instanceof List l) { + switch (l.size()) { + case 0 -> b.appendNull(); + case 1 -> b.appendInt((Integer) l.get(0)); + default -> { + b.beginPositionEntry(); + for (Object o : l) { + b.appendInt((Integer) o); + } + b.endPositionEntry(); + } + } + return; + } + } + if (builder instanceof LongBlock.Builder b) { + if (value instanceof Long v) { + b.appendLong(v); + return; + } + if (value instanceof List l) { + switch (l.size()) { + case 0 -> b.appendNull(); + case 1 -> b.appendLong((Long) l.get(0)); + default -> { + b.beginPositionEntry(); + for (Object o : l) { + b.appendLong((Long) o); + } + b.endPositionEntry(); + } + } + return; + } + } + if (builder instanceof DoubleBlock.Builder b) { + if (value instanceof Double v) { + b.appendDouble(v); + return; + } + if (value instanceof List l) { + switch (l.size()) { + case 0 -> b.appendNull(); + case 1 -> b.appendDouble((Double) l.get(0)); + default -> { + b.beginPositionEntry(); + for (Object o : l) { + b.appendDouble((Double) o); + } + b.endPositionEntry(); + } + } + return; + } + } + if (builder instanceof BytesRefBlock.Builder b) { + if (value instanceof BytesRef v) { + b.appendBytesRef(v); + return; + } + if (value instanceof List l) { + switch (l.size()) { + case 0 -> b.appendNull(); + case 1 -> b.appendBytesRef((BytesRef) l.get(0)); + default -> { + b.beginPositionEntry(); + for (Object o : l) { + b.appendBytesRef((BytesRef) o); + } + b.endPositionEntry(); + } + } + return; + } + } + if (builder instanceof BooleanBlock.Builder b) { + if (value instanceof Boolean v) { + b.appendBoolean(v); + return; + } + if (value instanceof List l) { + switch (l.size()) { + case 0 -> b.appendNull(); + case 1 -> b.appendBoolean((Boolean) l.get(0)); + default -> { + b.beginPositionEntry(); + for (Object o : l) { + b.appendBoolean((Boolean) o); + } + b.endPositionEntry(); + } + } + return; + } + } + if (builder instanceof DocBlock.Builder b && value instanceof BlockUtils.Doc v) { b.appendShard(v.shard()).appendSegment(v.segment()).appendDoc(v.doc()); - } else { - throw new IllegalArgumentException("Can't append [" + value + "/" + value.getClass() + "] to [" + builder + "]"); + return; + } + if (value instanceof List l && l.isEmpty()) { + builder.appendNull(); + return; } + throw new IllegalArgumentException("Can't append [" + value + "/" + value.getClass() + "] to [" + builder + "]"); } public static void readInto(List> values, Page page) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowInTableLookupOperatorTests.java similarity index 87% rename from x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java rename to x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowInTableLookupOperatorTests.java index 66314f1a95e05..747309e3712e7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowInTableLookupOperatorTests.java @@ -22,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.matchesRegex; -public class HashLookupOperatorTests extends OperatorTestCase { +public class RowInTableLookupOperatorTests extends OperatorTestCase { @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { return new SequenceLongBlockSourceOperator(blockFactory, LongStream.range(0, size).map(l -> randomFrom(1, 7, 14, 20))); @@ -77,9 +77,9 @@ private void assertSimpleOutput(List input, List results, int keyCha @Override protected Operator.OperatorFactory simple() { - return new HashLookupOperator.Factory( - new HashLookupOperator.Key[] { - new HashLookupOperator.Key( + return new RowInTableLookupOperator.Factory( + new RowInTableLookupOperator.Key[] { + new RowInTableLookupOperator.Key( "foo", TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 1, 7, 14, 20 }, 4).asBlock() ) }, @@ -89,13 +89,13 @@ protected Operator.OperatorFactory simple() { @Override protected Matcher expectedDescriptionOfSimple() { - return matchesRegex("HashLookup\\[keys=\\[\\{name=foo, type=LONG, positions=4, size=\\d+b}], mapping=\\[0]]"); + return matchesRegex("RowInTableLookup\\[keys=\\[\\{name=foo, type=LONG, positions=4, size=\\d+b}], mapping=\\[0]]"); } @Override protected Matcher expectedToStringOfSimple() { return matchesRegex( - "HashLookup\\[keys=\\[foo], hash=PackedValuesBlockHash\\{groups=\\[0:LONG], entries=4, size=\\d+b}, mapping=\\[0]]" + "RowInTableLookup\\[PackedValuesBlockHash\\{groups=\\[0:LONG], entries=4, size=\\d+b}, keys=\\[foo], mapping=\\[0]]" ); } @@ -109,9 +109,9 @@ public void testSelectBlocks() { ); List clonedInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( - new HashLookupOperator.Factory( - new HashLookupOperator.Key[] { - new HashLookupOperator.Key( + new RowInTableLookupOperator.Factory( + new RowInTableLookupOperator.Key[] { + new RowInTableLookupOperator.Key( "foo", TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 1, 7, 14, 20 }, 4).asBlock() ) }, diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index f48900c647b26..fb6a59bf4190f 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -97,6 +97,7 @@ protected void shouldSkipTest(String testName) throws IOException { super.shouldSkipTest(testName); checkCapabilities(remoteClusterClient(), remoteFeaturesService(), testName, testCase); assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); + assumeTrue("can't test with metrics across cluster", hasMetricsCommand(testCase.query)); assumeTrue("Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, Clusters.oldVersion())); } @@ -235,4 +236,8 @@ static boolean hasIndexMetadata(String query) { } return false; } + + static boolean hasMetricsCommand(String query) { + return Arrays.stream(query.split("\\|")).anyMatch(s -> s.trim().toLowerCase(Locale.ROOT).startsWith("metrics")); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index 6d11f5bf9ebc4..c9e82c76367cc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -176,7 +176,7 @@ public class EsqlFeatures implements FeatureSpecification { private Set snapshotBuildFeatures() { assert Build.current().isSnapshot() : Build.current(); - return Set.of(COUNTER_TYPES); + return Set.of(METRICS_SYNTAX); } @Override @@ -202,8 +202,7 @@ public Set getFeatures() { STRING_LITERAL_AUTO_CASTING_EXTENDED, METADATA_FIELDS, TIMESPAN_ABBREVIATIONS, - COUNTER_TYPES, - METRICS_SYNTAX + COUNTER_TYPES ); if (Build.current().isSnapshot()) { return Collections.unmodifiableSet(Sets.union(features, snapshotBuildFeatures())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 770c7fb10bb75..0774dc50b0d11 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -9,6 +9,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -222,11 +223,13 @@ public final void test() throws Throwable { try { assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); - assertThat( - "nonexistent capabilities declared as required", - testCase.requiredCapabilities, - everyItem(in(EsqlCapabilities.CAPABILITIES)) - ); + if (Build.current().isSnapshot()) { + assertThat( + "nonexistent capabilities declared as required", + testCase.requiredCapabilities, + everyItem(in(EsqlCapabilities.CAPABILITIES)) + ); + } /* * The csv tests support all but a few features. The unsupported features @@ -234,6 +237,7 @@ public final void test() throws Throwable { */ assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD))); + assumeFalse("can't load metrics in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METRICS_SYNTAX))); doTest(); } catch (Throwable th) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java index 8b37a0a205180..9c575c1b41cdf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java @@ -58,8 +58,9 @@ private static void addCases(List suppliers) { for (DataType type : new DataType[] { DataTypes.KEYWORD, DataTypes.TEXT }) { suppliers.add(new TestCaseSupplier(" with " + type.esType(), List.of(type, type), () -> { BytesRef str = new BytesRef(randomAlphaOfLength(5)); - BytesRef pattern = new BytesRef(randomAlphaOfLength(2)); - Boolean match = str.utf8ToString().startsWith(pattern.utf8ToString()); + String patternString = randomAlphaOfLength(2); + BytesRef pattern = new BytesRef(patternString + "*"); + Boolean match = str.utf8ToString().startsWith(patternString); return new TestCaseSupplier.TestCase( List.of( new TestCaseSupplier.TypedData(str, type, "str"), diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java index 536de2d2b5aec..45cf7b3d70d04 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -116,10 +116,9 @@ public void testOnFailure() throws IllegalAccessException { SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(SetStepInfoUpdateTask.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(SetStepInfoUpdateTask.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "warning", SetStepInfoUpdateTask.class.getCanonicalName(), Level.WARN, @@ -128,7 +127,7 @@ public void testOnFailure() throws IllegalAccessException { ); task.onFailure(new RuntimeException("test exception")); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 6106600ee5f33..c67c6f29d69c5 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -17,6 +17,7 @@ requires org.apache.httpcomponents.httpasyncclient; requires org.apache.httpcomponents.httpcore.nio; requires org.apache.lucene.core; + requires org.apache.lucene.join; requires com.ibm.icu; exports org.elasticsearch.xpack.inference.action; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 6e7e675a49f9b..d82979bfb71e7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -30,6 +30,7 @@ import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; @@ -55,6 +56,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; +import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.rest.RestDeleteInferenceModelAction; import org.elasticsearch.xpack.inference.rest.RestGetInferenceModelAction; @@ -81,7 +83,7 @@ import static java.util.Collections.singletonList; -public class InferencePlugin extends Plugin implements ActionPlugin, ExtensiblePlugin, SystemIndexPlugin, MapperPlugin { +public class InferencePlugin extends Plugin implements ActionPlugin, ExtensiblePlugin, SystemIndexPlugin, MapperPlugin, SearchPlugin { /** * When this setting is true the verification check that @@ -290,4 +292,11 @@ public Collection getActionFilters() { } return List.of(); } + + public List> getQueries() { + if (SemanticTextFeature.isEnabled()) { + return List.of(new QuerySpec<>(SemanticQueryBuilder.NAME, SemanticQueryBuilder::new, SemanticQueryBuilder::fromXContent)); + } + return List.of(); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index 1e9c0d4ef9522..8324b121dfc4f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.mapper; import org.apache.lucene.search.Query; +import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; @@ -34,12 +35,20 @@ import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentLocation; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; import java.io.IOException; import java.util.ArrayList; @@ -56,6 +65,8 @@ import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.CHUNKS_FIELD; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.INFERENCE_FIELD; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.INFERENCE_ID_FIELD; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getChunksFieldName; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getEmbeddingsFieldName; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getOriginalTextFieldName; /** @@ -326,6 +337,85 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { throw new IllegalArgumentException("[semantic_text] fields do not support sorting, scripting or aggregating"); } + + public QueryBuilder semanticQuery(InferenceResults inferenceResults, float boost, String queryName) { + String nestedFieldPath = getChunksFieldName(name()); + String inferenceResultsFieldName = getEmbeddingsFieldName(name()); + QueryBuilder childQueryBuilder; + + if (modelSettings == null) { + // No inference results have been indexed yet + childQueryBuilder = new MatchNoneQueryBuilder(); + } else { + childQueryBuilder = switch (modelSettings.taskType()) { + case SPARSE_EMBEDDING -> { + if (inferenceResults instanceof TextExpansionResults == false) { + throw new IllegalArgumentException( + "Field [" + + name() + + "] expected query inference results to be of type [" + + TextExpansionResults.NAME + + "]," + + " got [" + + inferenceResults.getWriteableName() + + "]. Has the inference endpoint configuration changed?" + ); + } + + // TODO: Use WeightedTokensQueryBuilder + TextExpansionResults textExpansionResults = (TextExpansionResults) inferenceResults; + var boolQuery = QueryBuilders.boolQuery(); + for (var weightedToken : textExpansionResults.getWeightedTokens()) { + boolQuery.should( + QueryBuilders.termQuery(inferenceResultsFieldName, weightedToken.token()).boost(weightedToken.weight()) + ); + } + boolQuery.minimumShouldMatch(1); + + yield boolQuery; + } + case TEXT_EMBEDDING -> { + if (inferenceResults instanceof TextEmbeddingResults == false) { + throw new IllegalArgumentException( + "Field [" + + name() + + "] expected query inference results to be of type [" + + TextEmbeddingResults.NAME + + "]," + + " got [" + + inferenceResults.getWriteableName() + + "]. Has the inference endpoint configuration changed?" + ); + } + + TextEmbeddingResults textEmbeddingResults = (TextEmbeddingResults) inferenceResults; + float[] inference = textEmbeddingResults.getInferenceAsFloat(); + if (inference.length != modelSettings.dimensions()) { + throw new IllegalArgumentException( + "Field [" + + name() + + "] expected query inference results with " + + modelSettings.dimensions() + + " dimensions, got " + + inference.length + + " dimensions. Has the inference endpoint configuration changed?" + ); + } + + yield new KnnVectorQueryBuilder(inferenceResultsFieldName, inference, null, null); + } + default -> throw new IllegalStateException( + "Field [" + + name() + + "] configured to use an inference endpoint with an unsupported task type [" + + modelSettings.taskType() + + "]" + ); + }; + } + + return new NestedQueryBuilder(nestedFieldPath, childQueryBuilder, ScoreMode.Max).boost(boost).queryName(queryName); + } } private static ObjectMapper createInferenceField( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java new file mode 100644 index 0000000000000..4d90920f45bac --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.apache.lucene.search.Query; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; +import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; +import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; +import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class SemanticQueryBuilder extends AbstractQueryBuilder { + public static final String NAME = "semantic"; + + private static final ParseField FIELD_FIELD = new ParseField("field"); + private static final ParseField QUERY_FIELD = new ParseField("query"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + false, + args -> new SemanticQueryBuilder((String) args[0], (String) args[1]) + ); + + static { + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareString(constructorArg(), QUERY_FIELD); + declareStandardFields(PARSER); + } + + private final String fieldName; + private final String query; + private final SetOnce inferenceResultsSupplier; + private final InferenceResults inferenceResults; + private final boolean noInferenceResults; + + public SemanticQueryBuilder(String fieldName, String query) { + if (fieldName == null) { + throw new IllegalArgumentException("[" + NAME + "] requires a " + FIELD_FIELD.getPreferredName() + " value"); + } + if (query == null) { + throw new IllegalArgumentException("[" + NAME + "] requires a " + QUERY_FIELD.getPreferredName() + " value"); + } + this.fieldName = fieldName; + this.query = query; + this.inferenceResults = null; + this.inferenceResultsSupplier = null; + this.noInferenceResults = false; + } + + public SemanticQueryBuilder(StreamInput in) throws IOException { + super(in); + this.fieldName = in.readString(); + this.query = in.readString(); + this.inferenceResults = in.readOptionalNamedWriteable(InferenceResults.class); + this.noInferenceResults = in.readBoolean(); + this.inferenceResultsSupplier = null; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + if (inferenceResultsSupplier != null) { + throw new IllegalStateException("Inference results supplier is set. Missing a rewriteAndFetch?"); + } + out.writeString(fieldName); + out.writeString(query); + out.writeOptionalNamedWriteable(inferenceResults); + out.writeBoolean(noInferenceResults); + } + + private SemanticQueryBuilder( + SemanticQueryBuilder other, + SetOnce inferenceResultsSupplier, + InferenceResults inferenceResults, + boolean noInferenceResults + ) { + this.fieldName = other.fieldName; + this.query = other.query; + this.boost = other.boost; + this.queryName = other.queryName; + this.inferenceResultsSupplier = inferenceResultsSupplier; + this.inferenceResults = inferenceResults; + this.noInferenceResults = noInferenceResults; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.SEMANTIC_QUERY; + } + + public static SemanticQueryBuilder fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(FIELD_FIELD.getPreferredName(), fieldName); + builder.field(QUERY_FIELD.getPreferredName(), query); + boostAndQueryNameToXContent(builder); + builder.endObject(); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { + SearchExecutionContext searchExecutionContext = queryRewriteContext.convertToSearchExecutionContext(); + if (searchExecutionContext != null) { + return doRewriteBuildSemanticQuery(searchExecutionContext); + } + + return doRewriteGetInferenceResults(queryRewriteContext); + } + + private QueryBuilder doRewriteBuildSemanticQuery(SearchExecutionContext searchExecutionContext) { + MappedFieldType fieldType = searchExecutionContext.getFieldType(fieldName); + if (fieldType == null) { + return new MatchNoneQueryBuilder(); + } else if (fieldType instanceof SemanticTextFieldMapper.SemanticTextFieldType semanticTextFieldType) { + if (inferenceResults == null) { + // This should never happen, but throw on it in case it ever does + throw new IllegalStateException( + "No inference results set for [" + semanticTextFieldType.typeName() + "] field [" + fieldName + "]" + ); + } + + return semanticTextFieldType.semanticQuery(inferenceResults, boost(), queryName()); + } else { + throw new IllegalArgumentException( + "Field [" + fieldName + "] of type [" + fieldType.typeName() + "] does not support " + NAME + " queries" + ); + } + } + + private SemanticQueryBuilder doRewriteGetInferenceResults(QueryRewriteContext queryRewriteContext) { + if (inferenceResults != null || noInferenceResults) { + return this; + } + + if (inferenceResultsSupplier != null) { + InferenceResults inferenceResults = validateAndConvertInferenceResults(inferenceResultsSupplier, fieldName); + return inferenceResults != null ? new SemanticQueryBuilder(this, null, inferenceResults, noInferenceResults) : this; + } + + ResolvedIndices resolvedIndices = queryRewriteContext.getResolvedIndices(); + if (resolvedIndices == null) { + throw new IllegalStateException( + "Rewriting on the coordinator node requires a query rewrite context with non-null resolved indices" + ); + } else if (resolvedIndices.getRemoteClusterIndices().isEmpty() == false) { + throw new IllegalArgumentException(NAME + " query does not support cross-cluster search"); + } + + String inferenceId = getInferenceIdForForField(resolvedIndices.getConcreteLocalIndicesMetadata().values(), fieldName); + SetOnce inferenceResultsSupplier = new SetOnce<>(); + boolean noInferenceResults = false; + if (inferenceId != null) { + InferenceAction.Request inferenceRequest = new InferenceAction.Request( + TaskType.ANY, + inferenceId, + null, + List.of(query), + Map.of(), + InputType.SEARCH, + InferModelAction.Request.DEFAULT_TIMEOUT_FOR_API + ); + + queryRewriteContext.registerAsyncAction( + (client, listener) -> executeAsyncWithOrigin( + client, + ML_ORIGIN, + InferenceAction.INSTANCE, + inferenceRequest, + listener.delegateFailureAndWrap((l, inferenceResponse) -> { + inferenceResultsSupplier.set(inferenceResponse.getResults()); + l.onResponse(null); + }) + ) + ); + } else { + // The inference ID can be null if either the field name or index name(s) are invalid (or both). + // If this happens, we set the "no inference results" flag to true so the rewrite process can continue. + // Invalid index names will be handled in the transport layer, when the query is sent to the shard. + // Invalid field names will be handled when the query is re-written on the shard, where we have access to the index mappings. + noInferenceResults = true; + } + + return new SemanticQueryBuilder(this, noInferenceResults ? null : inferenceResultsSupplier, null, noInferenceResults); + } + + private static InferenceResults validateAndConvertInferenceResults( + SetOnce inferenceResultsSupplier, + String fieldName + ) { + InferenceServiceResults inferenceServiceResults = inferenceResultsSupplier.get(); + if (inferenceServiceResults == null) { + return null; + } + + List inferenceResultsList = inferenceServiceResults.transformToCoordinationFormat(); + if (inferenceResultsList.isEmpty()) { + throw new IllegalArgumentException("No inference results retrieved for field [" + fieldName + "]"); + } else if (inferenceResultsList.size() > 1) { + // The inference call should truncate if the query is too large. + // Thus, if we receive more than one inference result, it is a server-side error. + throw new IllegalStateException(inferenceResultsList.size() + " inference results retrieved for field [" + fieldName + "]"); + } + + InferenceResults inferenceResults = inferenceResultsList.get(0); + if (inferenceResults instanceof ErrorInferenceResults errorInferenceResults) { + throw new IllegalStateException( + "Field [" + fieldName + "] query inference error: " + errorInferenceResults.getException().getMessage(), + errorInferenceResults.getException() + ); + } else if (inferenceResults instanceof WarningInferenceResults warningInferenceResults) { + throw new IllegalStateException("Field [" + fieldName + "] query inference warning: " + warningInferenceResults.getWarning()); + } else if (inferenceResults instanceof TextExpansionResults == false && inferenceResults instanceof TextEmbeddingResults == false) { + throw new IllegalArgumentException( + "Field [" + + fieldName + + "] expected query inference results to be of type [" + + TextExpansionResults.NAME + + "] or [" + + TextEmbeddingResults.NAME + + "], got [" + + inferenceResults.getWriteableName() + + "]. Has the inference endpoint configuration changed?" + ); + } + + return inferenceResults; + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + throw new IllegalStateException(NAME + " should have been rewritten to another query type"); + } + + private static String getInferenceIdForForField(Collection indexMetadataCollection, String fieldName) { + String inferenceId = null; + for (IndexMetadata indexMetadata : indexMetadataCollection) { + InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(fieldName); + String indexInferenceId = inferenceFieldMetadata != null ? inferenceFieldMetadata.getInferenceId() : null; + if (indexInferenceId != null) { + if (inferenceId != null && inferenceId.equals(indexInferenceId) == false) { + throw new IllegalArgumentException("Field [" + fieldName + "] has multiple inference IDs associated with it"); + } + + inferenceId = indexInferenceId; + } + } + + return inferenceId; + } + + @Override + protected boolean doEquals(SemanticQueryBuilder other) { + return Objects.equals(fieldName, other.fieldName) + && Objects.equals(query, other.query) + && Objects.equals(inferenceResults, other.inferenceResults); + } + + @Override + protected int doHashCode() { + return Objects.hash(fieldName, query, inferenceResults); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index 19776628a8d00..aacd72d8f1703 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; @@ -51,6 +51,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.model.TestModel; import org.junit.AssumptionViolatedException; @@ -174,42 +175,41 @@ public void testUpdatesToInferenceIdNotSupported() throws IOException { } public void testDynamicUpdate() throws IOException { + final String fieldName = "semantic"; + final String inferenceId = "test_service"; + MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge( "_doc", new CompressedXContent( - Strings.toString(PutMappingRequest.simpleMapping("semantic", "type=semantic_text,inference_id=test_service")) + Strings.toString(PutMappingRequest.simpleMapping(fieldName, "type=semantic_text,inference_id=" + inferenceId)) ), MapperService.MergeReason.MAPPING_UPDATE ); - String source = """ - { - "semantic": { - "inference": { - "inference_id": "test_service", - "model_settings": { - "task_type": "SPARSE_EMBEDDING" - }, - "chunks": [ - { - "embeddings": { - "feature_0": 1 - }, - "text": "feature_0" - } - ] - } - } - } - """; - SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); + + SemanticTextField semanticTextField = new SemanticTextField( + fieldName, + List.of(), + new SemanticTextField.InferenceResult( + inferenceId, + new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null), + List.of() + ), + XContentType.JSON + ); + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + builder.field(semanticTextField.fieldName()); + builder.value(semanticTextField); + builder.endObject(); + + SourceToParse sourceToParse = new SourceToParse("test", BytesReference.bytes(builder), XContentType.JSON); ParsedDocument parsedDocument = mapperService.documentMapper().parse(sourceToParse); mapperService.merge( "_doc", parsedDocument.dynamicMappingsUpdate().toCompressedXContent(), MapperService.MergeReason.MAPPING_UPDATE ); - assertSemanticTextField(mapperService, "semantic", true); + assertSemanticTextField(mapperService, fieldName, true); } public void testUpdateModelSettings() throws IOException { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java index e75d1c92e0e62..1f58c4165056d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java @@ -7,24 +7,15 @@ package org.elasticsearch.xpack.inference.mapper; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.index.mapper.NonDynamicFieldMapperTests; -import org.elasticsearch.inference.Model; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; -import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.junit.Before; import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.nullValue; public class SemanticTextNonDynamicFieldMapperTests extends NonDynamicFieldMapperTests { @@ -50,39 +41,4 @@ protected String getMapping() { "inference_id": "%s" """, SemanticTextFieldMapper.CONTENT_TYPE, TestSparseInferenceServiceExtension.TestInferenceService.NAME); } - - private void storeSparseModel() throws Exception { - Model model = new TestSparseInferenceServiceExtension.TestSparseModel( - TestSparseInferenceServiceExtension.TestInferenceService.NAME, - new TestSparseInferenceServiceExtension.TestServiceSettings("sparse_model", null, false) - ); - storeModel(model); - } - - private void storeModel(Model model) throws Exception { - ModelRegistry modelRegistry = new ModelRegistry(client()); - - AtomicReference storeModelHolder = new AtomicReference<>(); - AtomicReference exceptionHolder = new AtomicReference<>(); - - blockingCall(listener -> modelRegistry.storeModel(model, listener), storeModelHolder, exceptionHolder); - - assertThat(storeModelHolder.get(), is(true)); - assertThat(exceptionHolder.get(), is(nullValue())); - } - - private void blockingCall(Consumer> function, AtomicReference response, AtomicReference error) - throws InterruptedException { - CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = ActionListener.wrap(r -> { - response.set(r); - latch.countDown(); - }, e -> { - error.set(e); - latch.countDown(); - }); - - function.accept(listener); - latch.await(); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java new file mode 100644 index 0000000000000..6e11feecebd73 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java @@ -0,0 +1,349 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.KnnFloatVectorQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractQueryTestCase; +import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; +import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.mapper.SemanticTextField; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.apache.lucene.search.BooleanClause.Occur.FILTER; +import static org.apache.lucene.search.BooleanClause.Occur.MUST; +import static org.apache.lucene.search.BooleanClause.Occur.SHOULD; +import static org.elasticsearch.index.IndexVersions.NEW_SPARSE_VECTOR; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; + +public class SemanticQueryBuilderTests extends AbstractQueryTestCase { + private static final String SEMANTIC_TEXT_FIELD = "semantic"; + private static final float TOKEN_WEIGHT = 0.5f; + private static final int QUERY_TOKEN_LENGTH = 4; + private static final int TEXT_EMBEDDING_DIMENSION_COUNT = 10; + private static final String INFERENCE_ID = "test_service"; + + private static InferenceResultType inferenceResultType; + + private enum InferenceResultType { + NONE, + SPARSE_EMBEDDING, + TEXT_EMBEDDING + } + + private Integer queryTokenCount; + + @BeforeClass + public static void setInferenceResultType() { + // The inference result type is a class variable because it is used when initializing additional mappings, + // which happens once per test suite run in AbstractBuilderTestCase#beforeTest as part of service holder creation. + inferenceResultType = randomFrom(InferenceResultType.values()); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + queryTokenCount = null; + } + + @Override + protected Collection> getPlugins() { + return List.of(InferencePlugin.class, FakeMlPlugin.class); + } + + @Override + protected Settings createTestIndexSettings() { + // Randomize index version within compatible range + // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually. + IndexVersion indexVersionCreated = randomBoolean() + ? IndexVersion.current() + : IndexVersionUtils.randomVersionBetween(random(), NEW_SPARSE_VECTOR, IndexVersion.current()); + return Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexVersionCreated).build(); + } + + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + mapperService.merge( + "_doc", + new CompressedXContent( + Strings.toString(PutMappingRequest.simpleMapping(SEMANTIC_TEXT_FIELD, "type=semantic_text,inference_id=" + INFERENCE_ID)) + ), + MapperService.MergeReason.MAPPING_UPDATE + ); + + applyRandomInferenceResults(mapperService); + } + + private void applyRandomInferenceResults(MapperService mapperService) throws IOException { + // Parse random inference results (or no inference results) to set up the dynamic inference result mappings under the semantic text + // field + SourceToParse sourceToParse = buildSemanticTextFieldWithInferenceResults(inferenceResultType); + if (sourceToParse != null) { + ParsedDocument parsedDocument = mapperService.documentMapper().parse(sourceToParse); + mapperService.merge( + "_doc", + parsedDocument.dynamicMappingsUpdate().toCompressedXContent(), + MapperService.MergeReason.MAPPING_UPDATE + ); + } + } + + @Override + protected SemanticQueryBuilder doCreateTestQueryBuilder() { + queryTokenCount = randomIntBetween(1, 5); + List queryTokens = new ArrayList<>(queryTokenCount); + for (int i = 0; i < queryTokenCount; i++) { + queryTokens.add(randomAlphaOfLength(QUERY_TOKEN_LENGTH)); + } + + SemanticQueryBuilder builder = new SemanticQueryBuilder(SEMANTIC_TEXT_FIELD, String.join(" ", queryTokens)); + if (randomBoolean()) { + builder.boost((float) randomDoubleBetween(0.1, 10.0, true)); + } + if (randomBoolean()) { + builder.queryName(randomAlphaOfLength(4)); + } + + return builder; + } + + @Override + protected void doAssertLuceneQuery(SemanticQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { + assertThat(queryTokenCount, notNullValue()); + assertThat(query, notNullValue()); + assertThat(query, instanceOf(ESToParentBlockJoinQuery.class)); + + ESToParentBlockJoinQuery nestedQuery = (ESToParentBlockJoinQuery) query; + assertThat(nestedQuery.getScoreMode(), equalTo(ScoreMode.Max)); + + switch (inferenceResultType) { + case NONE -> assertThat(nestedQuery.getChildQuery(), instanceOf(MatchNoDocsQuery.class)); + case SPARSE_EMBEDDING -> assertSparseEmbeddingLuceneQuery(nestedQuery.getChildQuery()); + case TEXT_EMBEDDING -> assertTextEmbeddingLuceneQuery(nestedQuery.getChildQuery()); + } + } + + private void assertSparseEmbeddingLuceneQuery(Query query) { + Query innerQuery = assertOuterBooleanQuery(query); + assertThat(innerQuery, instanceOf(BooleanQuery.class)); + + BooleanQuery innerBooleanQuery = (BooleanQuery) innerQuery; + assertThat(innerBooleanQuery.clauses().size(), equalTo(queryTokenCount)); + innerBooleanQuery.forEach(c -> { + assertThat(c.getOccur(), equalTo(SHOULD)); + assertThat(c.getQuery(), instanceOf(BoostQuery.class)); + assertThat(((BoostQuery) c.getQuery()).getBoost(), equalTo(TOKEN_WEIGHT)); + }); + } + + private void assertTextEmbeddingLuceneQuery(Query query) { + Query innerQuery = assertOuterBooleanQuery(query); + assertThat(innerQuery, instanceOf(KnnFloatVectorQuery.class)); + } + + private Query assertOuterBooleanQuery(Query query) { + assertThat(query, instanceOf(BooleanQuery.class)); + BooleanQuery outerBooleanQuery = (BooleanQuery) query; + + List outerMustClauses = new ArrayList<>(); + List outerFilterClauses = new ArrayList<>(); + for (BooleanClause clause : outerBooleanQuery.clauses()) { + BooleanClause.Occur occur = clause.getOccur(); + if (occur == MUST) { + outerMustClauses.add(clause); + } else if (occur == FILTER) { + outerFilterClauses.add(clause); + } else { + fail("Unexpected boolean " + occur + " clause"); + } + } + + assertThat(outerMustClauses.size(), equalTo(1)); + assertThat(outerFilterClauses.size(), equalTo(1)); + + return outerMustClauses.get(0).getQuery(); + } + + @Override + protected boolean canSimulateMethod(Method method, Object[] args) throws NoSuchMethodException { + return method.equals(Client.class.getMethod("execute", ActionType.class, ActionRequest.class, ActionListener.class)) + && (args[0] instanceof InferenceAction); + } + + @Override + protected Object simulateMethod(Method method, Object[] args) { + InferenceAction.Request request = (InferenceAction.Request) args[1]; + assertThat(request.getTaskType(), equalTo(TaskType.ANY)); + assertThat(request.getInputType(), equalTo(InputType.SEARCH)); + + List input = request.getInput(); + assertThat(input.size(), equalTo(1)); + String query = input.get(0); + + InferenceAction.Response response = switch (inferenceResultType) { + case NONE -> randomBoolean() ? generateSparseEmbeddingInferenceResponse(query) : generateTextEmbeddingInferenceResponse(); + case SPARSE_EMBEDDING -> generateSparseEmbeddingInferenceResponse(query); + case TEXT_EMBEDDING -> generateTextEmbeddingInferenceResponse(); + }; + + @SuppressWarnings("unchecked") // We matched the method above. + ActionListener listener = (ActionListener) args[2]; + listener.onResponse(response); + + return null; + } + + private InferenceAction.Response generateSparseEmbeddingInferenceResponse(String query) { + List weightedTokens = Arrays.stream(query.split("\\s+")) + .map(s -> new TextExpansionResults.WeightedToken(s, TOKEN_WEIGHT)) + .toList(); + TextExpansionResults textExpansionResults = new TextExpansionResults(DEFAULT_RESULTS_FIELD, weightedTokens, false); + + return new InferenceAction.Response(SparseEmbeddingResults.of(List.of(textExpansionResults))); + } + + private InferenceAction.Response generateTextEmbeddingInferenceResponse() { + double[] inference = new double[TEXT_EMBEDDING_DIMENSION_COUNT]; + Arrays.fill(inference, 1.0); + TextEmbeddingResults textEmbeddingResults = new TextEmbeddingResults(DEFAULT_RESULTS_FIELD, inference, false); + + return new InferenceAction.Response( + org.elasticsearch.xpack.core.inference.results.TextEmbeddingResults.of(List.of(textEmbeddingResults)) + ); + } + + @Override + public void testMustRewrite() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + SemanticQueryBuilder builder = new SemanticQueryBuilder("foo", "bar"); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> builder.toQuery(context)); + assertThat(e.getMessage(), equalTo(SemanticQueryBuilder.NAME + " should have been rewritten to another query type")); + } + + public void testIllegalValues() { + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SemanticQueryBuilder(null, "query")); + assertThat(e.getMessage(), equalTo("[semantic] requires a field value")); + } + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SemanticQueryBuilder("fieldName", null)); + assertThat(e.getMessage(), equalTo("[semantic] requires a query value")); + } + } + + public void testToXContent() throws IOException { + QueryBuilder queryBuilder = new SemanticQueryBuilder("foo", "bar"); + checkGeneratedJson(""" + { + "semantic": { + "field": "foo", + "query": "bar" + } + }""", queryBuilder); + } + + public void testSerializingQueryWhenNoInferenceId() throws IOException { + // Test serializing the query after rewriting on the coordinator node when no inference ID could be resolved for the field + SemanticQueryBuilder builder = new SemanticQueryBuilder(SEMANTIC_TEXT_FIELD + "_missing", "query text"); + + QueryRewriteContext queryRewriteContext = createQueryRewriteContext(); + queryRewriteContext.setAllowUnmappedFields(true); + + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + searchExecutionContext.setAllowUnmappedFields(true); + + QueryBuilder rewritten = rewriteQuery(builder, queryRewriteContext, searchExecutionContext); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + + private static SourceToParse buildSemanticTextFieldWithInferenceResults(InferenceResultType inferenceResultType) throws IOException { + SemanticTextField.ModelSettings modelSettings = switch (inferenceResultType) { + case NONE -> null; + case SPARSE_EMBEDDING -> new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null); + case TEXT_EMBEDDING -> new SemanticTextField.ModelSettings( + TaskType.TEXT_EMBEDDING, + TEXT_EMBEDDING_DIMENSION_COUNT, + SimilarityMeasure.COSINE + ); + }; + + SourceToParse sourceToParse = null; + if (modelSettings != null) { + SemanticTextField semanticTextField = new SemanticTextField( + SEMANTIC_TEXT_FIELD, + List.of(), + new SemanticTextField.InferenceResult(INFERENCE_ID, modelSettings, List.of()), + XContentType.JSON + ); + + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + builder.field(semanticTextField.fieldName()); + builder.value(semanticTextField); + builder.endObject(); + sourceToParse = new SourceToParse("test", BytesReference.bytes(builder), XContentType.JSON); + } + + return sourceToParse; + } + + public static class FakeMlPlugin extends Plugin { + @Override + public List getNamedWriteables() { + return new MlInferenceNamedXContentProvider().getNamedWriteables(); + } + } +} diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml new file mode 100644 index 0000000000000..8fffa7fa8c7ef --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -0,0 +1,418 @@ +setup: + - requires: + cluster_features: "gte_v8.15.0" + reason: semantic_text introduced in 8.15.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-sparse-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + indices.create: + index: test-dense-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-id + non_inference_field: + type: text + +--- +"Query using a sparse embedding model": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: ["inference test", "another inference test"] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 3.7837332e17, error: 1e10 } } + - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } + +--- +"Query using a dense embedding model": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-dense-index + id: doc_1 + body: + inference_field: ["inference test", "another inference test"] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 1.0, error: 0.0001 } } + - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } + +--- +"Apply boost and query name": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: ["inference test", "another inference test"] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + boost: 100.0 + _name: i-like-naming-my-queries + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 3.783733e19, error: 1e13 } } + - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } + - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + +--- +"Query an index alias": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: ["inference test", "another inference test"] + non_inference_field: "non inference test" + refresh: true + + - do: + indices.put_alias: + index: test-sparse-index + name: my-alias + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: my-alias + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 3.7837332e17, error: 1e10 } } + - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } + +--- +"Query the wrong field type": + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: "inference test" + non_inference_field: "non inference test" + refresh: true + + - do: + catch: bad_request + search: + index: test-sparse-index + body: + query: + semantic: + field: "non_inference_field" + query: "inference test" + + - match: { error.type: "search_phase_execution_exception" } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Field [non_inference_field] of type [text] does not support semantic queries" } + +--- +"Query a missing field": + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: "inference test" + non_inference_field: "non inference test" + refresh: true + + - do: + search: + index: test-sparse-index + body: + query: + semantic: + field: "missing_field" + query: "inference test" + + - match: { hits.total.value: 0 } + +--- +"Query a missing index": + - do: + catch: missing + search: + index: missing-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.type: "index_not_found_exception" } + - match: { error.reason: "no such index [missing-index]" } + + - do: + search: + index: missing-index + ignore_unavailable: true + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 0 } + +--- +"Query multiple indices": + - do: + catch: bad_request + search: + index: + - test-sparse-index + - test-dense-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Field [inference_field] has multiple inference IDs associated with it" } + + # Test wildcard resolution + - do: + catch: bad_request + search: + index: test-* + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Field [inference_field] has multiple inference IDs associated with it" } + + # Test querying an index alias that resolves to multiple indices + - do: + indices.put_alias: + index: + - test-sparse-index + - test-dense-index + name: my-alias + + - do: + catch: bad_request + search: + index: my-alias + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Field [inference_field] has multiple inference IDs associated with it" } + + # Test querying multiple indices that use the same inference ID - this should work + - do: + indices.create: + index: test-sparse-index-2 + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: "inference test" + refresh: true + + - do: + index: + index: test-sparse-index-2 + id: doc_2 + body: + inference_field: "another inference test" + refresh: true + + - do: + search: + index: test-sparse-index* + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "doc_2" } + - match: { hits.hits.1._id: "doc_1" } + +--- +"Query a field that has no indexed inference results": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 0 } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 0 } + +--- +"Query a field with an invalid inference ID": + - do: + indices.create: + index: test-index-with-invalid-inference-id + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: invalid-inference-id + + - do: + catch: missing + search: + index: test-index-with-invalid-inference-id + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.type: "resource_not_found_exception" } + - match: { error.reason: "Inference endpoint not found [invalid-inference-id]" } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml new file mode 100644 index 0000000000000..fd656c9d5d950 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml @@ -0,0 +1,188 @@ +setup: + - requires: + cluster_features: "gte_v8.15.0" + reason: semantic_text introduced in 8.15.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-sparse-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + indices.create: + index: test-dense-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-id + non_inference_field: + type: text + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: "inference test" + non_inference_field: "non inference test" + refresh: true + + - do: + index: + index: test-dense-index + id: doc_1 + body: + inference_field: "inference test" + non_inference_field: "non inference test" + refresh: true +--- +"sparse_embedding changed to text_embedding": + - do: + inference.delete: + inference_id: sparse-inference-id + + - do: + inference.put: + task_type: text_embedding + inference_id: sparse-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + catch: bad_request + search: + index: test-sparse-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.reason: "Field [inference_field] expected query inference results to be of type + [text_expansion_result], got [text_embedding_result]. Has the inference endpoint + configuration changed?" } + +--- +"text_embedding changed to sparse_embedding": + - do: + inference.delete: + inference_id: dense-inference-id + + - do: + inference.put: + task_type: sparse_embedding + inference_id: dense-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + catch: bad_request + search: + index: test-dense-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.reason: "Field [inference_field] expected query inference results to be of type + [text_embedding_result], got [text_expansion_result]. Has the inference endpoint + configuration changed?" } + +--- +"text_embedding dimension count changed": + - do: + inference.delete: + inference_id: dense-inference-id + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 20, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + catch: bad_request + search: + index: test-dense-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { error.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.reason: "Field [inference_field] expected query inference results with 10 dimensions, got + 20 dimensions. Has the inference endpoint configuration changed?" } diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index 7a724ee202c37..6d38729f36be2 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.MockUtils; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; @@ -52,9 +52,6 @@ public class TransportGetPipelineActionTests extends ESTestCase { * a TransportGetPipelineAction. */ public void testGetPipelineMultipleIDsPartialFailure() throws Exception { - // Set up a log appender for detecting log messages - final MockLogAppender mockLogAppender = new MockLogAppender(); - // Set up a MultiGetResponse GetResponse mockResponse = mock(GetResponse.class); when(mockResponse.getId()).thenReturn("1"); @@ -66,9 +63,9 @@ public void testGetPipelineMultipleIDsPartialFailure() throws Exception { new MultiGetItemResponse[] { new MultiGetItemResponse(mockResponse, null), new MultiGetItemResponse(null, failure) } ); - try (var threadPool = createThreadPool(); var ignored = mockLogAppender.capturing(TransportGetPipelineAction.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var threadPool = createThreadPool(); var mockLog = MockLog.capture(TransportGetPipelineAction.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "message", "org.elasticsearch.xpack.logstash.action.TransportGetPipelineAction", Level.INFO, @@ -89,7 +86,7 @@ public void onResponse(GetPipelineResponse getPipelineResponse) { assertThat(getPipelineResponse.pipelines().size(), equalTo(1)); // check that failed pipeline get is logged - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 1b5951ffdb0e0..57eba5816d3cd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -1475,7 +1475,7 @@ public List getRestHandlers( restHandlers.add(new RestCatDataFrameAnalyticsAction()); } if (machineLearningExtension.get().isNlpEnabled()) { - restHandlers.add(new RestStartTrainedModelDeploymentAction()); + restHandlers.add(new RestStartTrainedModelDeploymentAction(machineLearningExtension.get().disableInferenceProcessCache())); restHandlers.add(new RestStopTrainedModelDeploymentAction()); restHandlers.add(new RestInferTrainedModelDeploymentAction()); restHandlers.add(new RestUpdateTrainedModelDeploymentAction()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java index c27568c6e3b5c..0f8024dd7207a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java @@ -29,6 +29,10 @@ default boolean isLearningToRankEnabled() { return false; } + default boolean disableInferenceProcessCache() { + return false; + } + String[] getAnalyticsDestIndexAllowedSettings(); AbstractNodeAvailabilityZoneMapper getNodeAvailabilityZoneMapper(Settings settings, ClusterSettings clusterSettings); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index 7d8567eb32f40..168a81ba554dc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -31,8 +31,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskInfo; @@ -44,13 +42,12 @@ import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; +import org.elasticsearch.xpack.ml.utils.InferenceProcessorInfoExtractor; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -141,34 +138,6 @@ static void cancelDownloadTask(Client client, String modelId, ActionListener null, taskListener); } - static Set getReferencedModelKeys(IngestMetadata ingestMetadata, IngestService ingestService) { - Set allReferencedModelKeys = new HashSet<>(); - if (ingestMetadata == null) { - return allReferencedModelKeys; - } - for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { - String pipelineId = entry.getKey(); - Map config = entry.getValue().getConfigAsMap(); - try { - Pipeline pipeline = Pipeline.create( - pipelineId, - config, - ingestService.getProcessorFactories(), - ingestService.getScriptService() - ); - pipeline.getProcessors() - .stream() - .filter(p -> p instanceof InferenceProcessor) - .map(p -> (InferenceProcessor) p) - .map(InferenceProcessor::getModelId) - .forEach(allReferencedModelKeys::add); - } catch (Exception ex) { - logger.warn(() -> "failed to load pipeline [" + pipelineId + "]", ex); - } - } - return allReferencedModelKeys; - } - static List getModelAliases(ClusterState clusterState, String modelId) { final ModelAliasMetadata currentMetadata = ModelAliasMetadata.fromState(clusterState); final List modelAliases = new ArrayList<>(); @@ -183,7 +152,7 @@ static List getModelAliases(ClusterState clusterState, String modelId) { private void deleteModel(DeleteTrainedModelAction.Request request, ClusterState state, ActionListener listener) { String id = request.getId(); IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); - Set referencedModels = getReferencedModelKeys(currentIngestMetadata, ingestService); + Set referencedModels = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(currentIngestMetadata); if (request.isForce() == false && referencedModels.contains(id)) { listener.onFailure( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java index fe8a4ff029d69..78ac4bc11d17a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java @@ -34,14 +34,13 @@ import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAliasAction; import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; +import org.elasticsearch.xpack.ml.utils.InferenceProcessorInfoExtractor; import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.ml.action.TransportDeleteTrainedModelAction.getReferencedModelKeys; - public class TransportDeleteTrainedModelAliasAction extends AcknowledgedTransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportDeleteTrainedModelAliasAction.class); @@ -113,7 +112,7 @@ static ClusterState deleteModelAlias( ); } IngestMetadata currentIngestMetadata = currentState.metadata().custom(IngestMetadata.TYPE); - Set referencedModels = getReferencedModelKeys(currentIngestMetadata, ingestService); + Set referencedModels = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(currentIngestMetadata); if (referencedModels.contains(request.getModelAlias())) { throw new ElasticsearchStatusException( "Cannot delete model_alias [{}] as it is still referenced by ingest processors", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java index 5f1ec76ae2de2..10027cbd0d6bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterService; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; +import org.elasticsearch.xpack.ml.utils.InferenceProcessorInfoExtractor; import java.util.List; import java.util.Objects; @@ -47,7 +48,6 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.ml.action.TransportDeleteTrainedModelAction.getModelAliases; -import static org.elasticsearch.xpack.ml.action.TransportDeleteTrainedModelAction.getReferencedModelKeys; /** * Class for transporting stop trained model deployment requests. @@ -123,7 +123,7 @@ protected void doExecute( } IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); - Set referencedModels = getReferencedModelKeys(currentIngestMetadata, ingestService); + Set referencedModels = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(currentIngestMetadata); if (request.isForce() == false) { if (referencedModels.contains(request.getId())) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index 43e20a6581e07..deb645ff96133 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -768,7 +768,7 @@ public void clusterChanged(ClusterChangedEvent event) { ClusterState state = event.state(); IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); Set allReferencedModelKeys = event.changedCustomMetadataSet().contains(IngestMetadata.TYPE) - ? getReferencedModelKeys(currentIngestMetadata) + ? countInferenceProcessors(currentIngestMetadata) : new HashSet<>(referencedModels); Set referencedModelsBeforeClusterState; Set loadingModelBeforeClusterState = null; @@ -975,7 +975,7 @@ private static Queue addFluently(Queue queue, T object) { return queue; } - private static Set getReferencedModelKeys(IngestMetadata ingestMetadata) { + private static Set countInferenceProcessors(IngestMetadata ingestMetadata) { Set allReferencedModelKeys = new HashSet<>(); if (ingestMetadata == null) { return allReferencedModelKeys; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java index d3cb46d4e98bc..a7b679717c2a0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java @@ -36,6 +36,18 @@ @ServerlessScope(Scope.PUBLIC) public class RestStartTrainedModelDeploymentAction extends BaseRestHandler { + public RestStartTrainedModelDeploymentAction(boolean disableInferenceProcessCache) { + super(); + if (disableInferenceProcessCache) { + this.defaultCacheSize = ByteSizeValue.ZERO; + } else { + // Don't set the default cache size yet + defaultCacheSize = null; + } + } + + private final ByteSizeValue defaultCacheSize; + @Override public String getName() { return "xpack_ml_start_trained_models_deployment_action"; @@ -98,6 +110,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request.setCacheSize( ByteSizeValue.parseBytesSizeValue(restRequest.param(CACHE_SIZE.getPreferredName()), CACHE_SIZE.getPreferredName()) ); + } else if (defaultCacheSize != null) { + request.setCacheSize(defaultCacheSize); } request.setQueueCapacity(restRequest.paramAsInt(QUEUE_CAPACITY.getPreferredName(), request.getQueueCapacity())); request.setPriority( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractor.java index 5a2f044d1f7be..fe16cc4c59bed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractor.java @@ -70,6 +70,35 @@ public static int countInferenceProcessors(ClusterState state) { return (int) counter.get(); } + /** + * @param ingestMetadata The ingestMetadata of current ClusterState + * @return The set of model IDs referenced by inference processors + */ + @SuppressWarnings("unchecked") + public static Set getModelIdsFromInferenceProcessors(IngestMetadata ingestMetadata) { + if (ingestMetadata == null) { + return Set.of(); + } + + Set modelIds = new LinkedHashSet<>(); + ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { + Map configMap = configuration.getConfigAsMap(); + List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + for (Map processorConfigWithKey : processorConfigs) { + for (Map.Entry entry : processorConfigWithKey.entrySet()) { + addModelsAndPipelines( + entry.getKey(), + pipelineId, + (Map) entry.getValue(), + pam -> modelIds.add(pam.modelIdOrAlias()), + 0 + ); + } + } + }); + return modelIds; + } + /** * @param state Current cluster state * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java index fb8b1a31508fc..70b63b8872d8f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java @@ -11,8 +11,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; -import org.elasticsearch.test.MockLogAppender.LoggingExpectation; +import org.elasticsearch.test.MockLog; +import org.elasticsearch.test.MockLog.LoggingExpectation; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -106,41 +106,20 @@ public void testThrottlingSummary() throws IllegalAccessException, TimeoutExcept ).getBytes(StandardCharsets.UTF_8) ); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test1", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 1" - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + executeLoggingTest( + is, + Level.INFO, + "test_throttling", + new MockLog.SeenEventExpectation("test1", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1"), + new MockLog.SeenEventExpectation( "test2", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1 | repeated [5]" - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test3", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 4" - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test4", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 5" - ) + ), + new MockLog.SeenEventExpectation("test3", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 4"), + new MockLog.SeenEventExpectation("test4", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 5") ); - - executeLoggingTest(is, Level.INFO, "test_throttling"); } public void testThrottlingSummaryOneRepeat() throws IllegalAccessException, TimeoutException, IOException { @@ -160,30 +139,15 @@ public void testThrottlingSummaryOneRepeat() throws IllegalAccessException, Time is, Level.INFO, "test_throttling", - new MockLogAppender.SeenEventExpectation( - "test1", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 1" - ), - new MockLogAppender.UnseenEventExpectation( + new MockLog.SeenEventExpectation("test1", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1"), + new MockLog.UnseenEventExpectation( "test2", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1 | repeated [1]" ), - new MockLogAppender.SeenEventExpectation( - "test1", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 4" - ), - new MockLogAppender.SeenEventExpectation( - "test2", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 5" - ) + new MockLog.SeenEventExpectation("test1", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 4"), + new MockLog.SeenEventExpectation("test2", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 5") ); } @@ -210,42 +174,22 @@ public void testThrottlingSummaryLevelChanges() throws IllegalAccessException, T is, Level.INFO, "test_throttling", - new MockLogAppender.SeenEventExpectation( - "test1", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 1" - ), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation("test1", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1"), + new MockLog.SeenEventExpectation( "test2", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1 | repeated [2]" ), - new MockLogAppender.SeenEventExpectation( - "test3", - CppLogMessageHandler.class.getName(), - Level.ERROR, - "[test_throttling] * message 3" - ), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation("test3", CppLogMessageHandler.class.getName(), Level.ERROR, "[test_throttling] * message 3"), + new MockLog.SeenEventExpectation( "test4", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1 | repeated [3]" ), - new MockLogAppender.SeenEventExpectation( - "test5", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 4" - ), - new MockLogAppender.SeenEventExpectation( - "test6", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 5" - ) + new MockLog.SeenEventExpectation("test5", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 4"), + new MockLog.SeenEventExpectation("test6", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 5") ); } @@ -267,13 +211,8 @@ public void testThrottlingLastMessageRepeast() throws IllegalAccessException, Ti is, Level.INFO, "test_throttling", - new MockLogAppender.SeenEventExpectation( - "test1", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 1" - ), - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation("test1", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1"), + new MockLog.SeenEventExpectation( "test2", CppLogMessageHandler.class.getName(), Level.INFO, @@ -301,19 +240,9 @@ public void testThrottlingDebug() throws IllegalAccessException, TimeoutExceptio is, Level.DEBUG, "test_throttling", - new MockLogAppender.SeenEventExpectation( - "test1", - CppLogMessageHandler.class.getName(), - Level.INFO, - "[test_throttling] * message 1" - ), - new MockLogAppender.SeenEventExpectation( - "test2", - CppLogMessageHandler.class.getName(), - Level.DEBUG, - "[test_throttling] * message 6" - ), - new MockLogAppender.UnseenEventExpectation( + new MockLog.SeenEventExpectation("test1", CppLogMessageHandler.class.getName(), Level.INFO, "[test_throttling] * message 1"), + new MockLog.SeenEventExpectation("test2", CppLogMessageHandler.class.getName(), Level.DEBUG, "[test_throttling] * message 6"), + new MockLog.UnseenEventExpectation( "test3", CppLogMessageHandler.class.getName(), Level.INFO, @@ -359,7 +288,7 @@ private static void executeLoggingTest(InputStream is, Level level, String jobId Logger cppMessageLogger = LogManager.getLogger(CppLogMessageHandler.class); Level oldLevel = cppMessageLogger.getLevel(); - MockLogAppender.assertThatLogger(() -> { + MockLog.assertThatLogger(() -> { Loggers.setLevel(cppMessageLogger, level); try (CppLogMessageHandler handler = new CppLogMessageHandler(jobId, is)) { handler.tailStream(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java new file mode 100644 index 0000000000000..26f877a110dc4 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.rest.inference; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentTests; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class RestStartTrainedModelDeploymentActionTests extends RestActionTestCase { + + public void testCacheDisabled() { + final boolean disableInferenceProcessCache = true; + controller().registerHandler(new RestStartTrainedModelDeploymentAction(disableInferenceProcessCache)); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { + assertThat(actionRequest, instanceOf(StartTrainedModelDeploymentAction.Request.class)); + + var request = (StartTrainedModelDeploymentAction.Request) actionRequest; + assertThat(request.getCacheSize(), is(ByteSizeValue.ZERO)); + + executeCalled.set(true); + return createResponse(); + })); + + RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) + .withPath("_ml/trained_models/test_id/deployment/_start") + .build(); + dispatchRequest(inferenceRequest); + assertThat(executeCalled.get(), equalTo(true)); + } + + public void testCacheEnabled() { + final boolean disableInferenceProcessCache = false; + controller().registerHandler(new RestStartTrainedModelDeploymentAction(disableInferenceProcessCache)); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { + assertThat(actionRequest, instanceOf(StartTrainedModelDeploymentAction.Request.class)); + + var request = (StartTrainedModelDeploymentAction.Request) actionRequest; + assertNull(request.getCacheSize()); + + executeCalled.set(true); + return createResponse(); + })); + + RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) + .withPath("_ml/trained_models/test_id/deployment/_start") + .build(); + dispatchRequest(inferenceRequest); + assertThat(executeCalled.get(), equalTo(true)); + } + + private static CreateTrainedModelAssignmentAction.Response createResponse() { + return new CreateTrainedModelAssignmentAction.Response(TrainedModelAssignmentTests.randomInstance()); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractorTests.java index f7b8b8a0967f9..488c974cc27bb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractorTests.java @@ -64,6 +64,30 @@ public void testPipelineIdsByModelIds() throws IOException { ); } + public void testGetModelIdsFromInferenceProcessors() throws IOException { + String modelId1 = "trained_model_1"; + String modelId2 = "trained_model_2"; + String modelId3 = "trained_model_3"; + Set expectedModelIds = new HashSet<>(Arrays.asList(modelId1, modelId2, modelId3)); + + ClusterState clusterState = buildClusterStateWithModelReferences(2, modelId1, modelId2, modelId3); + IngestMetadata ingestMetadata = clusterState.metadata().custom(IngestMetadata.TYPE); + Set actualModelIds = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(ingestMetadata); + + assertThat(actualModelIds, equalTo(expectedModelIds)); + } + + public void testGetModelIdsFromInferenceProcessorsWhenNull() throws IOException { + + Set expectedModelIds = new HashSet<>(Arrays.asList()); + + ClusterState clusterState = buildClusterStateWithModelReferences(0); + IngestMetadata ingestMetadata = clusterState.metadata().custom(IngestMetadata.TYPE); + Set actualModelIds = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(ingestMetadata); + + assertThat(actualModelIds, equalTo(expectedModelIds)); + } + public void testNumInferenceProcessors() throws IOException { assertThat(InferenceProcessorInfoExtractor.countInferenceProcessors(buildClusterState(null)), equalTo(0)); assertThat(InferenceProcessorInfoExtractor.countInferenceProcessors(buildClusterState(Metadata.EMPTY_METADATA)), equalTo(0)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java index 152f7c0ea9a73..7615723860cff 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.search.SearchResponseUtils; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; import java.util.List; @@ -64,10 +64,9 @@ public void testSearchableSnapshotRelocationDoNotUseSnapshotBasedRecoveries() th final var newNode = internalCluster().startDataOnlyNode(); - final var mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(ShardSnapshotsService.class)) { - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + try (var mockLog = MockLog.capture(ShardSnapshotsService.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "Error fetching segments file", ShardSnapshotsService.class.getCanonicalName(), Level.WARN, @@ -82,7 +81,7 @@ public void testSearchableSnapshotRelocationDoNotUseSnapshotBasedRecoveries() th assertHitCount(prepareSearch(restoredIndexName).setTrackTotalHits(true), totalHits.value); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index eaac1431bef22..04c5415f51815 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -63,7 +63,7 @@ import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.test.rest.FakeRestRequest; @@ -127,7 +127,7 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.test.LambdaMatchers.falseWith; import static org.elasticsearch.test.LambdaMatchers.trueWith; -import static org.elasticsearch.test.MockLogAppender.assertThatLogger; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.security.operator.OperatorPrivileges.NOOP_OPERATOR_PRIVILEGES_SERVICE; import static org.elasticsearch.xpack.security.operator.OperatorPrivileges.OPERATOR_PRIVILEGES_ENABLED; @@ -786,20 +786,19 @@ public void testSecurityPluginInstallsRestHandlerInterceptorEvenIfSecurityIsDisa public void testSecurityRestHandlerInterceptorCanBeInstalled() throws IllegalAccessException { final Logger amLogger = LogManager.getLogger(ActionModule.class); Loggers.setLevel(amLogger, Level.DEBUG); - final MockLogAppender appender = new MockLogAppender(); Settings settings = Settings.builder().put("xpack.security.enabled", false).put("path.home", createTempDir()).build(); SettingsModule settingsModule = new SettingsModule(Settings.EMPTY); ThreadPool threadPool = new TestThreadPool(getTestName()); - try (var ignored = appender.capturing(ActionModule.class)) { + try (var mockLog = MockLog.capture(ActionModule.class)) { UsageService usageService = new UsageService(); Security security = new Security(settings); // Verify Security rest interceptor is about to be installed // We will throw later if another interceptor is already installed - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "Security rest interceptor", ActionModule.class.getName(), Level.DEBUG, @@ -828,7 +827,7 @@ public void testSecurityRestHandlerInterceptorCanBeInstalled() throws IllegalAcc ); actionModule.initRestHandlers(null, null); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { threadPool.shutdown(); } @@ -838,7 +837,6 @@ public void testSecurityStatusMessageInLog() throws Exception { final Logger mockLogger = LogManager.getLogger(Security.class); boolean securityEnabled = true; Loggers.setLevel(mockLogger, Level.INFO); - final MockLogAppender appender = new MockLogAppender(); Settings.Builder settings = Settings.builder().put("path.home", createTempDir()); if (randomBoolean()) { @@ -847,9 +845,9 @@ public void testSecurityStatusMessageInLog() throws Exception { settings.put("xpack.security.enabled", securityEnabled); } - try (var ignored = appender.capturing(Security.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(Security.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "message", Security.class.getName(), Level.INFO, @@ -857,7 +855,7 @@ public void testSecurityStatusMessageInLog() throws Exception { ) ); createComponents(settings.build()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -1132,8 +1130,8 @@ private String randomNonFipsCompliantStoredHash() { ); } - private MockLogAppender.SeenEventExpectation logEventForNonCompliantCacheHash(String settingKey) { - return new MockLogAppender.SeenEventExpectation( + private MockLog.SeenEventExpectation logEventForNonCompliantCacheHash(String settingKey) { + return new MockLog.SeenEventExpectation( "cache hash not fips compliant", Security.class.getName(), Level.WARN, @@ -1144,8 +1142,8 @@ private MockLogAppender.SeenEventExpectation logEventForNonCompliantCacheHash(St ); } - private MockLogAppender.SeenEventExpectation logEventForNonCompliantStoredHash(String settingKey) { - return new MockLogAppender.SeenEventExpectation( + private MockLog.SeenEventExpectation logEventForNonCompliantStoredHash(String settingKey) { + return new MockLog.SeenEventExpectation( "stored hash not fips compliant", Security.class.getName(), Level.WARN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java index 1a6ed48efe1d0..c01fc3480ed95 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; @@ -59,12 +59,11 @@ public void init() throws Exception { } public void testLogWhenLicenseProhibitsAuditing() throws Exception { - MockLogAppender mockLogAppender = new MockLogAppender(); - try (var ignored = mockLogAppender.capturing(AuditTrailService.class)) { + try (var mockLog = MockLog.capture(AuditTrailService.class)) { when(licenseState.getOperationMode()).thenReturn(randomFrom(License.OperationMode.values())); if (isAuditingAllowed) { - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "audit disabled because of license", AuditTrailService.class.getName(), Level.WARN, @@ -74,8 +73,8 @@ public void testLogWhenLicenseProhibitsAuditing() throws Exception { ) ); } else { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "audit disabled because of license", AuditTrailService.class.getName(), Level.WARN, @@ -89,16 +88,15 @@ public void testLogWhenLicenseProhibitsAuditing() throws Exception { service.get(); } - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } public void testNoLogRecentlyWhenLicenseProhibitsAuditing() throws Exception { - MockLogAppender mockLogAppender = new MockLogAppender(); - try (var ignored = mockLogAppender.capturing(AuditTrailService.class)) { + try (var mockLog = MockLog.capture(AuditTrailService.class)) { service.nextLogInstantAtomic.set(randomFrom(Instant.now().minus(Duration.ofMinutes(5)), Instant.now())); - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "audit disabled because of license", AuditTrailService.class.getName(), Level.WARN, @@ -108,7 +106,7 @@ public void testNoLogRecentlyWhenLicenseProhibitsAuditing() throws Exception { for (int i = 1; i <= randomIntBetween(2, 6); i++) { service.get(); } - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 0871e2568d225..44d0543cc7831 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -65,7 +65,7 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.threadpool.FixedExecutorBuilder; @@ -1498,19 +1498,18 @@ public void testApiKeyAuthCacheWillTraceLogOnEvictionDueToCacheSize() throws Ill IntStream.range(0, cacheSize).forEach(i -> apiKeyAuthCache.put(idPrefix + count.incrementAndGet(), new ListenableFuture<>())); final Logger logger = LogManager.getLogger(ApiKeyService.class); Loggers.setLevel(logger, Level.TRACE); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing(ApiKeyService.class)) { - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + try (var mockLog = MockLog.capture(ApiKeyService.class)) { + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "evict", ApiKeyService.class.getName(), Level.TRACE, "API key with ID \\[" + idPrefix + "[0-9]+\\] was evicted from the authentication cache.*" ) ); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "no-thrashing", ApiKeyService.class.getName(), Level.WARN, @@ -1518,10 +1517,10 @@ public void testApiKeyAuthCacheWillTraceLogOnEvictionDueToCacheSize() throws Ill ) ); apiKeyAuthCache.put(idPrefix + count.incrementAndGet(), new ListenableFuture<>()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "replace", ApiKeyService.class.getName(), Level.TRACE, @@ -1529,10 +1528,10 @@ public void testApiKeyAuthCacheWillTraceLogOnEvictionDueToCacheSize() throws Ill ) ); apiKeyAuthCache.put(idPrefix + count.get(), new ListenableFuture<>()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "invalidate", ApiKeyService.class.getName(), Level.TRACE, @@ -1541,7 +1540,7 @@ public void testApiKeyAuthCacheWillTraceLogOnEvictionDueToCacheSize() throws Ill ); apiKeyAuthCache.invalidate(idPrefix + count.get(), new ListenableFuture<>()); apiKeyAuthCache.invalidateAll(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, Level.INFO); } @@ -1559,11 +1558,10 @@ public void testApiKeyCacheWillNotTraceLogOnEvictionDueToCacheTtl() throws Illeg final Logger logger = LogManager.getLogger(ApiKeyService.class); Loggers.setLevel(logger, Level.TRACE); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing(ApiKeyService.class)) { - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + try (var mockLog = MockLog.capture(ApiKeyService.class)) { + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "evict", ApiKeyService.class.getName(), Level.TRACE, @@ -1577,7 +1575,7 @@ public void testApiKeyCacheWillNotTraceLogOnEvictionDueToCacheTtl() throws Illeg // Cache a new entry apiKeyAuthCache.put(randomValueOtherThan(apiKeyId, () -> randomAlphaOfLength(22)), new ListenableFuture<>()); assertEquals(1, apiKeyAuthCache.count()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, Level.INFO); } @@ -1592,9 +1590,8 @@ public void testApiKeyAuthCacheWillLogWarningOnPossibleThrashing() throws Except apiKeyAuthCache.put(randomAlphaOfLength(21), new ListenableFuture<>()); final Logger logger = LogManager.getLogger(ApiKeyService.class); Loggers.setLevel(logger, Level.TRACE); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing(ApiKeyService.class)) { + try (var mockLog = MockLog.capture(ApiKeyService.class)) { // Prepare the warning logging to trigger service.getEvictionCounter().add(4500); final long thrashingCheckIntervalInSeconds = 300L; @@ -1606,16 +1603,16 @@ public void testApiKeyAuthCacheWillLogWarningOnPossibleThrashing() throws Except service.getLastEvictionCheckedAt().set(lastCheckedAt); // Ensure the counter is updated assertBusy(() -> assertThat(service.getEvictionCounter().longValue() >= 4500, is(true))); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "evict", ApiKeyService.class.getName(), Level.TRACE, "API key with ID [*] was evicted from the authentication cache*" ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "thrashing", ApiKeyService.class.getName(), Level.WARN, @@ -1623,23 +1620,23 @@ public void testApiKeyAuthCacheWillLogWarningOnPossibleThrashing() throws Except ) ); apiKeyAuthCache.put(randomAlphaOfLength(22), new ListenableFuture<>()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Counter and timer should be reset assertThat(service.getLastEvictionCheckedAt().get(), lessThanOrEqualTo(System.nanoTime())); assertBusy(() -> assertThat(service.getEvictionCounter().longValue(), equalTo(0L))); // Will not log warning again for the next eviction because of throttling - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "evict-again", ApiKeyService.class.getName(), Level.TRACE, "API key with ID [*] was evicted from the authentication cache*" ) ); - appender.addExpectation( - new MockLogAppender.UnseenEventExpectation( + mockLog.addExpectation( + new MockLog.UnseenEventExpectation( "throttling", ApiKeyService.class.getName(), Level.WARN, @@ -1647,7 +1644,7 @@ public void testApiKeyAuthCacheWillLogWarningOnPossibleThrashing() throws Except ) ); apiKeyAuthCache.put(randomAlphaOfLength(23), new ListenableFuture<>()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, Level.INFO); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 68e703fb26e26..42de8d014edf2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -53,7 +53,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -417,11 +417,9 @@ public void testTokenFirstMissingSecondFound() throws Exception { } public void testTokenMissing() throws Exception { - final MockLogAppender mockAppender = new MockLogAppender(); - - try (var ignored = mockAppender.capturing(RealmsAuthenticator.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(RealmsAuthenticator.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "unlicensed realms", RealmsAuthenticator.class.getName(), Level.WARN, @@ -456,7 +454,7 @@ public void testTokenMissing() throws Exception { verify(auditTrail).anonymousAccessDenied(reqId.get(), "_action", transportRequest); } verifyNoMoreInteractions(auditTrail); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); setCompletedToTrue(completed); }); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java index 82b0a06a6dc52..4517b639b7604 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; @@ -480,11 +480,10 @@ public void testRunAsIsIgnoredForUnsupportedAuthenticationTypes() throws Illegal final Logger logger = LogManager.getLogger(AuthenticatorChain.class); Loggers.setLevel(logger, Level.INFO); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing(AuthenticatorChain.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(AuthenticatorChain.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "run-as", AuthenticatorChain.class.getName(), Level.INFO, @@ -494,7 +493,7 @@ public void testRunAsIsIgnoredForUnsupportedAuthenticationTypes() throws Illegal final PlainActionFuture future = new PlainActionFuture<>(); authenticatorChain.maybeLookupRunAsUser(context, authentication, future); assertThat(future.actionGet(), equalTo(authentication)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, Level.INFO); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java index 1c2c617a46bb7..b35a2f8ccc4d3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; @@ -205,10 +205,9 @@ public void testNullUser() throws IllegalAccessException { final ElasticsearchSecurityException e = new ElasticsearchSecurityException("fail"); when(request.authenticationFailed(authenticationToken)).thenReturn(e); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(RealmsAuthenticator.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(RealmsAuthenticator.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "unlicensed realms", RealmsAuthenticator.class.getName(), Level.WARN, @@ -219,7 +218,7 @@ public void testNullUser() throws IllegalAccessException { final PlainActionFuture> future = new PlainActionFuture<>(); realmsAuthenticator.authenticate(context, future); assertThat(expectThrows(ElasticsearchSecurityException.class, future::actionGet), is(e)); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 3fa0c8a52b65f..b66b035cec447 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -935,13 +935,12 @@ public void testRealmsAreDisabledOnLicenseDowngrade() throws Exception { verify(licenseState).enableUsageTracking(Security.CUSTOM_REALMS_FEATURE, "custom_realm_2"); final Logger realmsLogger = LogManager.getLogger(Realms.class); - final MockLogAppender appender = new MockLogAppender(); when(licenseState.statusDescription()).thenReturn("mock license"); - try (var ignored = appender.capturing(Realms.class)) { + try (var mockLog = MockLog.capture(Realms.class)) { for (String realmId : List.of("kerberos.kerberos_realm", "type_0.custom_realm_1", "type_1.custom_realm_2")) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "Realm [" + realmId + "] disabled", realmsLogger.getName(), Level.WARN, @@ -950,7 +949,7 @@ public void testRealmsAreDisabledOnLicenseDowngrade() throws Exception { ); } allowOnlyStandardRealms(); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } final List unlicensedRealmNames = realms.getUnlicensedRealms().stream().map(r -> r.name()).collect(Collectors.toList()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index 7fa3ee96de469..f839e5e7c1dcb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -77,7 +77,7 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TestMatchers; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; @@ -970,7 +970,6 @@ public void testHandleUserinfoResponseFailure() throws Exception { public void testLogIdTokenAndNonce() throws URISyntaxException, BadJOSEException, JOSEException, IllegalAccessException { final Logger logger = LogManager.getLogger(OpenIdConnectAuthenticator.class); - final MockLogAppender appender = new MockLogAppender(); Loggers.setLevel(logger, Level.DEBUG); final RealmConfig config = buildConfig(getBasicRealmSettings().build(), threadContext); @@ -997,12 +996,12 @@ public void testLogIdTokenAndNonce() throws URISyntaxException, BadJOSEException final Nonce expectedNonce = new Nonce(randomAlphaOfLength(10)); - try (var ignored = appender.capturing(OpenIdConnectAuthenticator.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation("JWT header", logger.getName(), Level.DEBUG, "ID Token Header: " + headerString) + try (var mockLog = MockLog.capture(OpenIdConnectAuthenticator.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation("JWT header", logger.getName(), Level.DEBUG, "ID Token Header: " + headerString) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "JWT exception", logger.getName(), Level.DEBUG, @@ -1014,7 +1013,7 @@ public void testLogIdTokenAndNonce() throws URISyntaxException, BadJOSEException final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); assertThat(e.getCause(), is(joseException)); // The logging message assertion is the only thing we actually care in this test - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, (Level) null); openIdConnectAuthenticator.close(); @@ -1058,12 +1057,11 @@ public void testHttpClientConnectionTtlBehaviour() throws URISyntaxException, Il // In addition, capture logs to show that kept alive (TTL) is honored final Logger logger = LogManager.getLogger(PoolingNHttpClientConnectionManager.class); - final MockLogAppender appender = new MockLogAppender(); // Note: Setting an org.apache.http logger to DEBUG requires es.insecure_network_trace_enabled=true Loggers.setLevel(logger, Level.DEBUG); - try (var ignored = appender.capturing(PoolingNHttpClientConnectionManager.class)) { - appender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + try (var mockLog = MockLog.capture(PoolingNHttpClientConnectionManager.class)) { + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "log", logger.getName(), Level.DEBUG, @@ -1092,7 +1090,7 @@ public void cancelled() { latch.await(); Thread.sleep(1500); } - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); assertThat(portTested.get(), is(true)); } finally { Loggers.setLevel(logger, (Level) null); @@ -1202,11 +1200,10 @@ public Object next() { authenticator = new OpenIdConnectAuthenticator(config, getOpConfig(), getDefaultRpConfig(), new SSLService(env), null); final Logger logger = LogManager.getLogger(OpenIdConnectAuthenticator.class); - final MockLogAppender appender = new MockLogAppender(); Loggers.setLevel(logger, Level.DEBUG); - try (var ignored = appender.capturing(OpenIdConnectAuthenticator.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(OpenIdConnectAuthenticator.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "log", logger.getName(), Level.DEBUG, @@ -1215,7 +1212,7 @@ public Object next() { ); final ConnectionKeepAliveStrategy keepAliveStrategy = authenticator.getKeepAliveStrategy(); assertThat(keepAliveStrategy.getKeepAliveDuration(httpResponse, null), equalTo(effectiveTtlInMs)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, (Level) null); authenticator.close(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java index ad836b8131934..16c7b39fa6953 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.util.NamedFormatter; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.hamcrest.Matchers; import org.junit.Before; @@ -215,10 +215,9 @@ private void testLoggingWarnOnSpecialAttributeName(String attributeName, String .add(getAttribute(attributeName, attributeFriendlyName, null, List.of("daredevil"))); SamlToken token = token(signResponse(response)); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(authenticator.getClass())) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(authenticator.getClass())) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "attribute name warning", authenticator.getClass().getName(), Level.WARN, @@ -227,7 +226,7 @@ private void testLoggingWarnOnSpecialAttributeName(String attributeName, String ); final SamlAttributes attributes = authenticator.authenticate(token); assertThat(attributes, notNullValue()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -240,11 +239,10 @@ public void testLoggingNoLogIfNotSpecialAttributeName() throws Exception { assertion.getAttributeStatements().get(0).getAttributes().add(getAttribute(UID_OID, "friendly", null, List.of("daredevil"))); SamlToken token = token(signResponse(response)); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(authenticator.getClass())) { + try (var mockLog = MockLog.capture(authenticator.getClass())) { final SamlAttributes attributes = authenticator.authenticate(token); assertThat(attributes, notNullValue()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -261,18 +259,17 @@ public void testLoggingWarnOnSpecialAttributeNameInNameAndFriendlyName() throws SamlToken token = token(signResponse(response)); final Logger samlLogger = LogManager.getLogger(authenticator.getClass()); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(authenticator.getClass())) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(authenticator.getClass())) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "attribute name warning", authenticator.getClass().getName(), Level.WARN, SPECIAL_ATTRIBUTE_LOG_MESSAGE ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "attribute friendly name warning", authenticator.getClass().getName(), Level.WARN, @@ -281,7 +278,7 @@ public void testLoggingWarnOnSpecialAttributeNameInNameAndFriendlyName() throws ); final SamlAttributes attributes = authenticator.authenticate(token); assertThat(attributes, notNullValue()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } @@ -866,10 +863,9 @@ public void testLoggingWhenAudienceCheckFails() throws Exception { String xml = SamlUtils.getXmlContent(response, false); final SamlToken token = token(signResponse(xml)); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(authenticator.getClass())) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(authenticator.getClass())) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "similar audience", authenticator.getClass().getName(), Level.INFO, @@ -882,8 +878,8 @@ public void testLoggingWhenAudienceCheckFails() throws Exception { + "] [:80/] vs [/])" ) ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "not similar audience", authenticator.getClass().getName(), Level.INFO, @@ -892,7 +888,7 @@ public void testLoggingWhenAudienceCheckFails() throws Exception { ); final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); assertThat(exception.getMessage(), containsString("required audience")); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java index 1529fda3d6578..c66f3168c7b7d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.action.service.CreateServiceAccountTokenRequest; @@ -111,16 +111,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { final Logger sasLogger = LogManager.getLogger(ServiceAccountService.class); Loggers.setLevel(sasLogger, Level.TRACE); - final MockLogAppender satAppender = new MockLogAppender(); - final MockLogAppender sasAppender = new MockLogAppender(); - - try ( - var ignored1 = satAppender.capturing(ServiceAccountToken.class); - var ignored2 = sasAppender.capturing(ServiceAccountService.class) - ) { + try (var satMockLog = MockLog.capture(ServiceAccountToken.class); var sasMockLog = MockLog.capture(ServiceAccountService.class)) { // Less than 4 bytes - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "less than 4 bytes", ServiceAccountToken.class.getName(), Level.TRACE, @@ -129,11 +123,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ); final SecureString bearerString0 = createBearerString(List.of(Arrays.copyOfRange(magicBytes, 0, randomIntBetween(0, 3)))); assertNull(ServiceAccountService.tryParseToken(bearerString0)); - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // Prefix mismatch - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "prefix mismatch", ServiceAccountToken.class.getName(), Level.TRACE, @@ -147,11 +141,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ) ); assertNull(ServiceAccountService.tryParseToken(bearerString1)); - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // No colon - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "no colon", ServiceAccountToken.class.getName(), Level.TRACE, @@ -162,11 +156,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { List.of(magicBytes, randomAlphaOfLengthBetween(30, 50).getBytes(StandardCharsets.UTF_8)) ); assertNull(ServiceAccountService.tryParseToken(bearerString2)); - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // Invalid delimiter for qualified name - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "invalid delimiter for qualified name", ServiceAccountToken.class.getName(), Level.TRACE, @@ -194,11 +188,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ); assertNull(ServiceAccountService.tryParseToken(bearerString3)); } - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // Invalid token name - sasAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + sasMockLog.addExpectation( + new MockLog.SeenEventExpectation( "invalid token name", ServiceAccountService.class.getName(), Level.TRACE, @@ -218,7 +212,7 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ) ); assertNull(ServiceAccountService.tryParseToken(bearerString4)); - sasAppender.assertAllExpectationsMatched(); + sasMockLog.assertAllExpectationsMatched(); // Everything is good final String namespace = randomAlphaOfLengthBetween(3, 8); @@ -242,8 +236,8 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertThat(parsedToken, equalTo(serviceAccountToken2)); // Invalid magic byte - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "invalid magic byte again", ServiceAccountToken.class.getName(), Level.TRACE, @@ -253,11 +247,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AQEAAWVsYXN0aWMvZmxlZXQvdG9rZW4xOnN1cGVyc2VjcmV0".toCharArray())) ); - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // No colon - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "no colon again", ServiceAccountToken.class.getName(), Level.TRACE, @@ -267,11 +261,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQvdG9rZW4xX3N1cGVyc2VjcmV0".toCharArray())) ); - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // Invalid qualified name - satAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + satMockLog.addExpectation( + new MockLog.SeenEventExpectation( "invalid delimiter for qualified name again", ServiceAccountToken.class.getName(), Level.TRACE, @@ -281,11 +275,11 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXRfdG9rZW4xOnN1cGVyc2VjcmV0".toCharArray())) ); - satAppender.assertAllExpectationsMatched(); + satMockLog.assertAllExpectationsMatched(); // Invalid token name - sasAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + sasMockLog.addExpectation( + new MockLog.SeenEventExpectation( "invalid token name again", ServiceAccountService.class.getName(), Level.TRACE, @@ -295,7 +289,7 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQvdG9rZW4hOnN1cGVyc2VjcmV0".toCharArray())) ); - sasAppender.assertAllExpectationsMatched(); + sasMockLog.assertAllExpectationsMatched(); // everything is fine assertThat( @@ -366,15 +360,14 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx final Logger sasLogger = LogManager.getLogger(ServiceAccountService.class); Loggers.setLevel(sasLogger, Level.TRACE); - final MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing(ServiceAccountService.class)) { + try (var mockLog = MockLog.capture(ServiceAccountService.class)) { // non-elastic service account final ServiceAccountId accountId1 = new ServiceAccountId( randomValueOtherThan(ElasticServiceAccounts.NAMESPACE, () -> randomAlphaOfLengthBetween(3, 8)), randomAlphaOfLengthBetween(3, 8) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "non-elastic service account", ServiceAccountService.class.getName(), Level.DEBUG, @@ -397,15 +390,15 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx + "]" ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Unknown elastic service name final ServiceAccountId accountId2 = new ServiceAccountId( ElasticServiceAccounts.NAMESPACE, randomValueOtherThan("fleet-server", () -> randomAlphaOfLengthBetween(3, 8)) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "unknown elastic service name", ServiceAccountService.class.getName(), Level.DEBUG, @@ -427,14 +420,14 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx + "]" ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Length of secret value is too short final ServiceAccountId accountId3 = new ServiceAccountId(ElasticServiceAccounts.NAMESPACE, "fleet-server"); final SecureString secret3 = new SecureString(randomAlphaOfLengthBetween(1, 9).toCharArray()); final ServiceAccountToken token3 = new ServiceAccountToken(accountId3, randomAlphaOfLengthBetween(3, 8), secret3); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "secret value too short", ServiceAccountService.class.getName(), Level.DEBUG, @@ -457,7 +450,7 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx + "]" ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); final TokenInfo.TokenSource tokenSource = randomFrom(TokenInfo.TokenSource.values()); final CachingServiceAccountTokenStore store; @@ -524,8 +517,8 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx ) ); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "invalid credential", ServiceAccountService.class.getName(), Level.DEBUG, @@ -550,7 +543,7 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx + "]" ) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(sasLogger, Level.INFO); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java index 8295f028588cc..faf75e849260c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; @@ -130,7 +130,7 @@ public void testWarning() throws Exception { ); final int elapsedMs = warnMs + randomIntBetween(1, 100); - final MockLogAppender.PatternSeenEventExpectation expectation = new MockLogAppender.PatternSeenEventExpectation( + final MockLog.PatternSeenEventExpectation expectation = new MockLog.PatternSeenEventExpectation( "WARN-Slow Index Resolution", timerLogger.getName(), Level.WARN, @@ -156,7 +156,7 @@ public void testInfo() throws Exception { ); final int elapsedMs = infoMs + randomIntBetween(1, 100); - final MockLogAppender.PatternSeenEventExpectation expectation = new MockLogAppender.PatternSeenEventExpectation( + final MockLog.PatternSeenEventExpectation expectation = new MockLog.PatternSeenEventExpectation( "INFO-Slow Index Resolution", timerLogger.getName(), Level.INFO, @@ -171,7 +171,7 @@ public void testInfo() throws Exception { private void testLogging( LoadAuthorizedIndicesTimeChecker.Thresholds thresholds, int elapsedMs, - MockLogAppender.PatternSeenEventExpectation expectation + MockLog.PatternSeenEventExpectation expectation ) throws IllegalAccessException { final User user = new User("slow-user", "slow-role"); final Authentication authentication = AuthenticationTestHelper.builder() @@ -192,11 +192,10 @@ private void testLogging( requestInfo, thresholds ); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(timerLogger.getName())) { - mockAppender.addExpectation(expectation); + try (var mockLog = MockLog.capture(timerLogger.getName())) { + mockLog.addExpectation(expectation); checker.accept(List.of()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index fd32bde0f3c53..c137c7b00b678 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -45,7 +45,7 @@ import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -296,10 +296,9 @@ public void testLoggingWarnWhenDlsUnlicensed() throws IOException, IllegalAccess effectiveRoleDescriptors::set ); - final MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(RoleDescriptorStore.class)) { - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(RoleDescriptorStore.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "disabled role warning", RoleDescriptorStore.class.getName(), Level.WARN, @@ -311,7 +310,7 @@ public void testLoggingWarnWhenDlsUnlicensed() throws IOException, IllegalAccess getRoleForRoleNames(compositeRolesStore, Collections.singleton("dls"), roleFuture); assertEquals(Role.EMPTY, roleFuture.actionGet()); assertThat(effectiveRoleDescriptors.get(), empty()); - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java index a47d51ac2d1c2..aa95ea097413c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; @@ -101,12 +101,11 @@ public void testMarkOperatorUser() throws IllegalAccessException { // Will mark for the operator user final Logger logger = LogManager.getLogger(OperatorPrivileges.class); - final MockLogAppender appender = new MockLogAppender(); Loggers.setLevel(logger, Level.DEBUG); - try (var ignored = appender.capturing(OperatorPrivileges.class)) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(OperatorPrivileges.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "marking", logger.getName(), Level.DEBUG, @@ -118,7 +117,7 @@ public void testMarkOperatorUser() throws IllegalAccessException { AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR, threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) ); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, (Level) null); } @@ -210,13 +209,12 @@ public void testMaybeInterceptRequest() throws IllegalAccessException { when(xPackLicenseState.isAllowed(Security.OPERATOR_PRIVILEGES_FEATURE)).thenReturn(licensed); final Logger logger = LogManager.getLogger(OperatorPrivileges.class); - final MockLogAppender appender = new MockLogAppender(); Loggers.setLevel(logger, Level.DEBUG); - try (var ignored = appender.capturing(OperatorPrivileges.class)) { + try (var mockLog = MockLog.capture(OperatorPrivileges.class)) { final RestoreSnapshotRequest restoreSnapshotRequest = mock(RestoreSnapshotRequest.class); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "intercepting", logger.getName(), Level.DEBUG, @@ -225,7 +223,7 @@ public void testMaybeInterceptRequest() throws IllegalAccessException { ); operatorPrivilegesService.maybeInterceptRequest(new ThreadContext(Settings.EMPTY), restoreSnapshotRequest); verify(restoreSnapshotRequest).skipOperatorOnlyState(licensed); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } finally { Loggers.setLevel(logger, (Level) null); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java index ce216b90f6e77..34cfde8dc862f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -175,15 +175,14 @@ public void testFileAutoReload() throws Exception { Files.copy(sampleFile, inUseFile, StandardCopyOption.REPLACE_EXISTING); final Logger logger = LogManager.getLogger(FileOperatorUsersStore.class); - final MockLogAppender appender = new MockLogAppender(); Loggers.setLevel(logger, Level.TRACE); try ( - var ignored = appender.capturing(FileOperatorUsersStore.class); + var mockLog = MockLog.capture(FileOperatorUsersStore.class); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool) ) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "1st file parsing", logger.getName(), Level.INFO, @@ -209,7 +208,7 @@ public void testFileAutoReload() throws Exception { groups.get(2) ); assertEquals(new FileOperatorUsersStore.Group(Set.of("me@elastic.co"), "jwt1", "jwt", "realm", null, null), groups.get(3)); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Content does not change, the groups should not be updated try (BufferedWriter writer = Files.newBufferedWriter(inUseFile, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { @@ -217,11 +216,11 @@ public void testFileAutoReload() throws Exception { } watcherService.notifyNow(ResourceWatcherService.Frequency.HIGH); assertSame(groups, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Add one more entry - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "updating", logger.getName(), Level.INFO, @@ -236,11 +235,11 @@ public void testFileAutoReload() throws Exception { assertEquals(5, newGroups.size()); assertEquals(new FileOperatorUsersStore.Group(Set.of("operator_4")), newGroups.get(4)); }); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Add mal-formatted entry - appender.addExpectation( - new MockLogAppender.ExceptionSeenEventExpectation( + mockLog.addExpectation( + new MockLog.ExceptionSeenEventExpectation( "mal-formatted", logger.getName(), Level.ERROR, @@ -254,11 +253,11 @@ public void testFileAutoReload() throws Exception { } watcherService.notifyNow(ResourceWatcherService.Frequency.HIGH); assertEquals(5, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size()); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Delete the file will remove all the operator users - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "file not exist warning", logger.getName(), Level.WARN, @@ -268,7 +267,7 @@ public void testFileAutoReload() throws Exception { ); Files.delete(inUseFile); assertBusy(() -> assertEquals(0, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size())); - appender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); // Back to original content Files.copy(sampleFile, inUseFile, StandardCopyOption.REPLACE_EXISTING); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java index 19ddb70315388..ad8e15db6f032 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java @@ -14,8 +14,6 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.Settings; @@ -26,7 +24,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.xpack.core.common.socket.SocketAccess; @@ -119,21 +117,18 @@ public void testDiagnosticTrustManagerForHostnameVerificationFailure() throws Ex final SslConfiguration clientSslConfig = sslService.getSSLConfiguration(HTTP_CLIENT_SSL); final SSLSocketFactory clientSocketFactory = sslService.sslSocketFactory(clientSslConfig); - final Logger diagnosticLogger = LogManager.getLogger(DiagnosticTrustManager.class); - final MockLogAppender mockAppender = new MockLogAppender(); - // Apache clients implement their own hostname checking, but we don't want that. // We use a raw socket so we get the builtin JDK checking (which is what we use for transport protocol SSL checks) try ( - var ignored = mockAppender.capturing(DiagnosticTrustManager.class); + var mockLog = MockLog.capture(DiagnosticTrustManager.class); MockWebServer webServer = initWebServer(sslService); SSLSocket clientSocket = (SSLSocket) clientSocketFactory.createSocket() ) { String fileName = "/x-pack/plugin/security/build/resources/test/org/elasticsearch/xpack/ssl/SSLErrorMessageTests/ca1.crt" .replace('/', platformFileSeparator()); - mockAppender.addExpectation( - new MockLogAppender.PatternSeenEventExpectation( + mockLog.addExpectation( + new MockLog.PatternSeenEventExpectation( "ssl diagnostic", DiagnosticTrustManager.class.getName(), Level.WARN, @@ -168,7 +163,7 @@ public void testDiagnosticTrustManagerForHostnameVerificationFailure() throws Ex // Logging message failures are tricky to debug because you just get a "didn't find match" assertion failure. // You should be able to check the log output for the text that was logged and compare to the regex above. - mockAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 4b59f28a6792d..b9cde5d3a6b09 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -67,7 +67,7 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -376,10 +376,9 @@ public void testFallbacksToSourceNodeWhenSnapshotDownloadFails() throws Exceptio createSnapshot(repoName, "snap", Collections.singletonList(indexName)); String targetNode; - final var mockLogAppender = new MockLogAppender(); - try (var ignored = mockLogAppender.capturing(RecoverySourceHandler.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(RecoverySourceHandler.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected warn log about restore failure", RecoverySourceHandler.class.getName(), Level.WARN, @@ -392,7 +391,7 @@ public void testFallbacksToSourceNodeWhenSnapshotDownloadFails() throws Exceptio ensureGreen(); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); } RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); @@ -611,28 +610,22 @@ public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { recoverSnapshotFileRequestReceived.await(); - final var mockLogAppender = new MockLogAppender(); - try (var ignored = mockLogAppender.capturing(RecoverySourceHandler.class)) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( + try (var mockLog = MockLog.capture(RecoverySourceHandler.class)) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( "expected debug log about restore cancellation", RecoverySourceHandler.class.getName(), Level.DEBUG, "cancelled while recovering file [*] from snapshot" ) ); - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "expected no WARN logs", - RecoverySourceHandler.class.getName(), - Level.WARN, - "*" - ) + mockLog.addExpectation( + new MockLog.UnseenEventExpectation("expected no WARN logs", RecoverySourceHandler.class.getName(), Level.WARN, "*") ); assertAcked(indicesAdmin().prepareDelete(indexName).get()); - assertBusy(mockLogAppender::assertAllExpectationsMatched); + assertBusy(mockLog::assertAllExpectationsMatched); } respondToRecoverSnapshotFile.countDown(); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java index 98a7938c12a1f..1c38ed50ede39 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java @@ -28,8 +28,8 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; -import org.elasticsearch.test.MockLogAppender.LoggingExpectation; +import org.elasticsearch.test.MockLog; +import org.elasticsearch.test.MockLog.LoggingExpectation; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction; @@ -103,7 +103,7 @@ public void testReportSourceIndexChangesRunsEmpty() { DefaultCheckpointProvider provider = newCheckpointProvider(transformConfig); assertExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "warn when source is empty", checkpointProviderLogger.getName(), Level.WARN, @@ -121,7 +121,7 @@ public void testReportSourceIndexChangesRunsEmpty() { ); assertExpectation( - new MockLogAppender.UnseenEventExpectation( + new MockLog.UnseenEventExpectation( "do not warn if empty again", checkpointProviderLogger.getName(), Level.WARN, @@ -145,7 +145,7 @@ public void testReportSourceIndexChangesAddDelete() { DefaultCheckpointProvider provider = newCheckpointProvider(transformConfig); assertExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "info about adds/removal", checkpointProviderLogger.getName(), Level.DEBUG, @@ -163,7 +163,7 @@ public void testReportSourceIndexChangesAddDelete() { ); assertExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "info about adds/removal", checkpointProviderLogger.getName(), Level.DEBUG, @@ -180,7 +180,7 @@ public void testReportSourceIndexChangesAddDelete() { } ); assertExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "info about adds/removal", checkpointProviderLogger.getName(), Level.DEBUG, @@ -213,7 +213,7 @@ public void testReportSourceIndexChangesAddDeleteMany() { } assertExpectation( - new MockLogAppender.SeenEventExpectation( + new MockLog.SeenEventExpectation( "info about adds/removal", checkpointProviderLogger.getName(), Level.DEBUG, @@ -468,11 +468,10 @@ private void assertExpectation(LoggingExpectation loggingExpectation, AuditExpec transformAuditor.reset(); transformAuditor.addExpectation(auditExpectation); - MockLogAppender mockLogAppender = new MockLogAppender(); - try (var ignored = mockLogAppender.capturing(checkpointProviderLogger.getName())) { - mockLogAppender.addExpectation(loggingExpectation); + try (var mockLog = MockLog.capture(checkpointProviderLogger.getName())) { + mockLog.addExpectation(loggingExpectation); codeBlock.run(); - mockLogAppender.assertAllExpectationsMatched(); + mockLog.assertAllExpectationsMatched(); transformAuditor.assertAllExpectationsMatched(); } }